From 3eeaf6a339378da53b3ad25c9f6326b3a7910069 Mon Sep 17 00:00:00 2001 From: Daniel Verkamp Date: Mon, 30 Aug 2021 15:29:44 -0700 Subject: [PATCH] linux: fill pmem alignment area with anon map Rather than mapping past the end of the file when using a pmem backing file that is not 2 MiB aligned, use an anonymous mapping to fill the remaining part of the arena. This partially reverts https://crrev.com/c/2153103 while keeping the effective change: the anonymous mapping used to fill the padding is now added with the same protection as the file mapping. Also handle images that are not a multiple of the page size (typically 4096 bytes) - the memory mapping on the host will handle zero-filling reads and discarding writes past the end of the mapped file as long as we map a size containing the partial last page. BUG=chromium:1244217 TEST=Boot crosvm with non-2MB aligned pmem disk; read the last few bytes TEST=./test_all Change-Id: Ibe8da170175bb9befce924122b912a28a6dc0e7b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3131444 Tested-by: kokoro Reviewed-by: Chirantan Ekbote Commit-Queue: Daniel Verkamp --- src/linux.rs | 28 +++++++++++++++++++++++----- sys_util/src/mmap.rs | 19 +++++++++++++++++-- 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/src/linux.rs b/src/linux.rs index f1355a2939..03f4a91dbe 100644 --- a/src/linux.rs +++ b/src/linux.rs @@ -1056,7 +1056,8 @@ fn create_pmem_device( ) -> DeviceResult { let fd = open_file(&disk.path, disk.read_only, false /*O_DIRECT*/) .map_err(|e| Error::Disk(disk.path.clone(), e.into()))?; - let arena_size = { + + let (disk_size, arena_size) = { let metadata = std::fs::metadata(&disk.path).map_err(|e| Error::Disk(disk.path.to_path_buf(), e))?; let disk_len = metadata.len(); @@ -1071,9 +1072,12 @@ fn create_pmem_device( } else { 0 }; - disk_len - .checked_add(align_adjust) - .ok_or(Error::PmemDeviceImageTooBig)? + ( + disk_len, + disk_len + .checked_add(align_adjust) + .ok_or(Error::PmemDeviceImageTooBig)?, + ) }; let protection = { @@ -1087,11 +1091,25 @@ fn create_pmem_device( let arena = { // Conversion from u64 to usize may fail on 32bit system. let arena_size = usize::try_from(arena_size).map_err(|_| Error::PmemDeviceImageTooBig)?; + let disk_size = usize::try_from(disk_size).map_err(|_| Error::PmemDeviceImageTooBig)?; let mut arena = MemoryMappingArena::new(arena_size).map_err(Error::ReservePmemMemory)?; arena - .add_fd_offset_protection(0, arena_size, &fd, 0, protection) + .add_fd_offset_protection(0, disk_size, &fd, 0, protection) .map_err(Error::ReservePmemMemory)?; + + // If the disk is not a multiple of the page size, the OS will fill the remaining part + // of the page with zeroes. However, the anonymous mapping added below must start on a + // page boundary, so round up the size before calculating the offset of the anon region. + let disk_size = round_up_to_page_size(disk_size); + + if arena_size > disk_size { + // Add an anonymous region with the same protection as the disk mapping if the arena + // size was aligned. + arena + .add_anon_protection(disk_size, arena_size - disk_size, protection) + .map_err(Error::ReservePmemMemory)?; + } arena }; diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs index 28ad07f57a..576ccb410a 100644 --- a/sys_util/src/mmap.rs +++ b/sys_util/src/mmap.rs @@ -786,15 +786,30 @@ impl MemoryMappingArena { MemoryMapping::new_protection(size, Protection::none().set_read()).map(From::from) } + /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena + /// with `prot` protections. `offset` must be page aligned. + /// + /// # Arguments + /// * `offset` - Page aligned offset into the arena in bytes. + /// * `size` - Size of memory region in bytes. + /// * `prot` - Protection (e.g. readable/writable) of the memory region. + pub fn add_anon_protection( + &mut self, + offset: usize, + size: usize, + prot: Protection, + ) -> Result<()> { + self.try_add(offset, size, prot, None) + } + /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena. /// `offset` must be page aligned. /// /// # Arguments /// * `offset` - Page aligned offset into the arena in bytes. /// * `size` - Size of memory region in bytes. - /// * `fd` - File descriptor to mmap from. pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> { - self.try_add(offset, size, Protection::read_write(), None) + self.add_anon_protection(offset, size, Protection::read_write()) } /// Maps `size` bytes from the start of the given `fd` at `offset` bytes from