base: Refactor mmap to use builder pattern.

BUG=b:165423256
TEST=./build_test

Change-Id: Ia67c1a7fe29b66c9cab38476eecde8c25a55617b
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2442569
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Michael Hoyle <mikehoyle@google.com>
This commit is contained in:
Michael Hoyle 2020-09-30 23:51:39 -07:00 committed by Commit Bot
parent 00c392ae47
commit 3038e40a6b
14 changed files with 244 additions and 146 deletions

View file

@ -7,7 +7,7 @@ use std::fs::OpenOptions;
use std::io;
use crate::Pstore;
use base::MemoryMapping;
use base::MemoryMappingBuilder;
use hypervisor::Vm;
use resources::SystemAllocator;
use resources::{Alloc, MmioType};
@ -62,8 +62,10 @@ pub fn create_memory_region(
.allocate(pstore.size as u64, Alloc::Pstore, "pstore".to_owned())
.map_err(Error::ResourcesError)?;
let memory_mapping =
MemoryMapping::from_descriptor(&file, pstore.size as usize).map_err(Error::MmapError)?;
let memory_mapping = MemoryMappingBuilder::new(pstore.size as usize)
.from_descriptor(&file)
.build()
.map_err(Error::MmapError)?;
vm.add_memory_region(
GuestAddress(address),

View file

@ -10,7 +10,7 @@ mod shm;
mod timer;
pub use event::{Event, EventReadResult, ScopedEvent};
pub use mmap::MemoryMapping;
pub use mmap::{MemoryMapping, MemoryMappingBuilder};
pub use shm::{SharedMemory, Unix as SharedMemoryUnix};
pub use sys_util::ioctl::*;
pub use sys_util::sched::*;

View file

@ -14,71 +14,6 @@ pub type Result<T> = std::result::Result<T, MmapError>;
#[derive(Debug)]
pub struct MemoryMapping(SysUtilMmap);
impl MemoryMapping {
pub fn new(size: usize) -> Result<MemoryMapping> {
SysUtilMmap::new(size).map(|mmap| MemoryMapping(mmap))
}
pub fn from_descriptor(descriptor: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping> {
SysUtilMmap::from_fd(&wrap_descriptor(descriptor), size).map(|mmap| MemoryMapping(mmap))
}
pub fn from_descriptor_offset(
descriptor: &dyn AsRawDescriptor,
size: usize,
offset: u64,
) -> Result<MemoryMapping> {
SysUtilMmap::from_fd_offset(&wrap_descriptor(descriptor), size, offset)
.map(|mmap| MemoryMapping(mmap))
}
pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
SysUtilMmap::new_protection(size, prot).map(|mmap| MemoryMapping(mmap))
}
pub fn from_descriptor_offset_protection(
descriptor: &dyn AsRawDescriptor,
size: usize,
offset: u64,
prot: Protection,
) -> Result<MemoryMapping> {
SysUtilMmap::from_fd_offset_protection(&wrap_descriptor(descriptor), size, offset, prot)
.map(|mmap| MemoryMapping(mmap))
}
pub unsafe fn new_protection_fixed(
addr: *mut u8,
size: usize,
prot: Protection,
) -> Result<MemoryMapping> {
SysUtilMmap::new_protection_fixed(addr, size, prot).map(|mmap| MemoryMapping(mmap))
}
pub unsafe fn from_descriptor_offset_protection_fixed(
addr: *mut u8,
descriptor: &dyn AsRawDescriptor,
size: usize,
offset: u64,
prot: Protection,
) -> Result<MemoryMapping> {
SysUtilMmap::from_fd_offset_protection_fixed(
addr,
&wrap_descriptor(descriptor),
size,
offset,
prot,
)
.map(|mmap| MemoryMapping(mmap))
}
pub fn from_descriptor_offset_populate(
descriptor: &dyn AsRawDescriptor,
size: usize,
offset: u64,
) -> Result<MemoryMapping> {
SysUtilMmap::from_fd_offset_populate(&wrap_descriptor(descriptor), size, offset)
.map(|mmap| MemoryMapping(mmap))
}
pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
self.0.write_slice(buf, offset)
}
@ -124,6 +59,121 @@ impl MemoryMapping {
}
}
pub struct MemoryMappingBuilder<'a> {
descriptor: Option<&'a dyn AsRawDescriptor>,
size: usize,
offset: Option<u64>,
protection: Option<Protection>,
populate: bool,
}
/// Builds a MemoryMapping object from the specified arguments.
impl<'a> MemoryMappingBuilder<'a> {
/// Creates a new builder specifying size of the memory region in bytes.
pub fn new(size: usize) -> MemoryMappingBuilder<'a> {
MemoryMappingBuilder {
descriptor: None,
size,
offset: None,
protection: None,
populate: false,
}
}
/// Build the memory mapping given the specified descriptor to mapped memory
///
/// Default: Create a new memory mapping.
pub fn from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder {
self.descriptor = Some(descriptor);
self
}
/// Offset in bytes from the beginning of the mapping to start the mmap.
///
/// Default: No offset
pub fn offset(mut self, offset: u64) -> MemoryMappingBuilder<'a> {
self.offset = Some(offset);
self
}
/// Protection (e.g. readable/writable) of the memory region.
///
/// Default: Read/write
pub fn protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a> {
self.protection = Some(protection);
self
}
/// Request that the mapped pages are pre-populated
///
/// Default: Do not populate
pub fn populate(mut self) -> MemoryMappingBuilder<'a> {
self.populate = true;
self
}
/// Build a MemoryMapping from the provided options.
pub fn build(self) -> Result<MemoryMapping> {
match self.descriptor {
None => {
if self.populate {
// Population not supported for new mmaps
return Err(MmapError::InvalidArgument);
}
MemoryMappingBuilder::wrap(SysUtilMmap::new_protection(
self.size,
self.protection.unwrap_or(Protection::read_write()),
))
}
Some(descriptor) => {
MemoryMappingBuilder::wrap(SysUtilMmap::from_fd_offset_protection_populate(
&wrap_descriptor(descriptor),
self.size,
self.offset.unwrap_or(0),
self.protection.unwrap_or(Protection::read_write()),
self.populate,
))
}
}
}
/// Build a MemoryMapping from the provided options at a fixed address. Note this
/// is a separate function from build in order to isolate unsafe behavior.
///
/// # Safety
///
/// Function should not be called before the caller unmaps any mmap'd regions already
/// present at `(addr..addr+size)`. If another MemoryMapping object holds the same
/// address space, the destructors of those objects will conflict and the space could
/// be unmapped while still in use.
pub unsafe fn build_fixed(self, addr: *mut u8) -> Result<MemoryMapping> {
if self.populate {
// Population not supported for fixed mapping.
return Err(MmapError::InvalidArgument);
}
match self.descriptor {
None => MemoryMappingBuilder::wrap(SysUtilMmap::new_protection_fixed(
addr,
self.size,
self.protection.unwrap_or(Protection::read_write()),
)),
Some(descriptor) => {
MemoryMappingBuilder::wrap(SysUtilMmap::from_fd_offset_protection_fixed(
addr,
&wrap_descriptor(descriptor),
self.size,
self.offset.unwrap_or(0),
self.protection.unwrap_or(Protection::read_write()),
))
}
}
}
fn wrap(result: Result<SysUtilMmap>) -> Result<MemoryMapping> {
result.map(|mmap| MemoryMapping(mmap))
}
}
impl VolatileMemory for MemoryMapping {
fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
self.0.get_slice(offset, count)

View file

@ -6,7 +6,7 @@ use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::Arc;
use std::u32;
use base::{error, Event, MappedRegion, MemoryMapping};
use base::{error, Event, MappedRegion, MemoryMapping, MemoryMappingBuilder};
use hypervisor::Datamatch;
use msg_socket::{MsgReceiver, MsgSender};
use resources::{Alloc, MmioType, SystemAllocator};
@ -728,11 +728,11 @@ impl VfioPciDevice {
// Even if vm has mapped this region, but it is in vm main process,
// device process doesn't has this mapping, but vfio_dma_map() need it
// in device process, so here map it again.
let mmap = match MemoryMapping::from_descriptor_offset(
self.device.as_ref(),
mmap_size as usize,
offset,
) {
let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
.from_descriptor(self.device.as_ref())
.offset(offset)
.build()
{
Ok(v) => v,
Err(_e) => break,
};

View file

@ -21,7 +21,9 @@ use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
use std::ptr::{null, null_mut};
use base::{round_up_to_page_size, AsRawDescriptor, MemoryMapping, SharedMemory};
use base::{
round_up_to_page_size, AsRawDescriptor, MemoryMapping, MemoryMappingBuilder, SharedMemory,
};
use data_model::VolatileMemory;
const BUFFER_COUNT: usize = 3;
@ -206,7 +208,10 @@ impl DisplayT for DisplayWl {
let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
let buffer_shm = SharedMemory::named("GpuDisplaySurface", buffer_size as u64)
.map_err(GpuDisplayError::CreateShm)?;
let buffer_mem = MemoryMapping::from_descriptor(&buffer_shm, buffer_size).unwrap();
let buffer_mem = MemoryMappingBuilder::new(buffer_size)
.from_descriptor(&buffer_shm)
.build()
.unwrap();
// Safe because only a valid context, parent pointer (if not None), and buffer FD are used.
// The returned surface is checked for validity before being filed away.

View file

@ -30,7 +30,8 @@ use libc::{
use base::{
block_signal, errno_result, error, ioctl, ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val,
pagesize, signal, unblock_signal, AsRawDescriptor, Error, Event, FromRawDescriptor,
MappedRegion, MemoryMapping, MmapError, RawDescriptor, Result, SafeDescriptor,
MappedRegion, MemoryMapping, MemoryMappingBuilder, MmapError, RawDescriptor, Result,
SafeDescriptor,
};
use data_model::vec_with_array_field;
use kvm_sys::*;
@ -226,8 +227,10 @@ impl KvmVm {
// the value of the fd and we own the fd.
let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
let run_mmap =
MemoryMapping::from_descriptor(&vcpu, run_mmap_size).map_err(|_| Error::new(ENOSPC))?;
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
.from_descriptor(&vcpu)
.build()
.map_err(|_| Error::new(ENOSPC))?;
Ok(KvmVcpu {
vm: self.vm.try_clone()?,
@ -607,7 +610,9 @@ impl Vcpu for KvmVcpu {
fn try_clone(&self) -> Result<Self> {
let vm = self.vm.try_clone()?;
let vcpu = self.vcpu.try_clone()?;
let run_mmap = MemoryMapping::from_descriptor(&vcpu, self.run_mmap.size())
let run_mmap = MemoryMappingBuilder::new(self.run_mmap.size())
.from_descriptor(&vcpu)
.build()
.map_err(|_| Error::new(ENOSPC))?;
let vcpu_run_handle_fingerprint = self.vcpu_run_handle_fingerprint.clone();
@ -1104,7 +1109,7 @@ impl From<&MPState> for kvm_mp_state {
#[cfg(test)]
mod tests {
use super::*;
use base::{pagesize, MemoryMapping, MemoryMappingArena};
use base::{pagesize, MemoryMappingArena, MemoryMappingBuilder};
use std::os::unix::io::FromRawFd;
use std::thread;
use vm_memory::GuestAddress;
@ -1194,10 +1199,10 @@ mod tests {
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false)
.unwrap();
}
@ -1208,7 +1213,7 @@ mod tests {
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false)
.unwrap();
}
@ -1219,7 +1224,7 @@ mod tests {
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
let mem_ptr = mem.as_ptr();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
@ -1243,7 +1248,7 @@ mod tests {
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm).unwrap();
let mem_size = 0x2000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
assert!(vm
.add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false)
.is_err());

View file

@ -14,7 +14,7 @@ use std::pin::Pin;
use std::ptr::null_mut;
use std::sync::atomic::{AtomicU32, Ordering};
use base::{MappedRegion, MemoryMapping, WatchingEvents};
use base::{MappedRegion, MemoryMapping, MemoryMappingBuilder, WatchingEvents};
use crate::bindings::*;
use crate::syscalls::*;
@ -121,34 +121,40 @@ impl URingContext {
// Safe because we trust the kernel to set valid sizes in `io_uring_setup` and any error
// is checked.
let submit_ring = SubmitQueueState::new(
MemoryMapping::from_descriptor_offset_populate(
&ring_file,
MemoryMappingBuilder::new(
ring_params.sq_off.array as usize
+ ring_params.sq_entries as usize * std::mem::size_of::<u32>(),
u64::from(IORING_OFF_SQ_RING),
)
.from_descriptor(&ring_file)
.offset(u64::from(IORING_OFF_SQ_RING))
.populate()
.build()
.map_err(Error::MappingSubmitRing)?,
&ring_params,
);
let num_sqe = ring_params.sq_entries as usize;
let submit_queue_entries = SubmitQueueEntries {
mmap: MemoryMapping::from_descriptor_offset_populate(
&ring_file,
mmap: MemoryMappingBuilder::new(
ring_params.sq_entries as usize * std::mem::size_of::<io_uring_sqe>(),
u64::from(IORING_OFF_SQES),
)
.from_descriptor(&ring_file)
.offset(u64::from(IORING_OFF_SQES))
.populate()
.build()
.map_err(Error::MappingSubmitEntries)?,
len: num_sqe,
};
let complete_ring = CompleteQueueState::new(
MemoryMapping::from_descriptor_offset_populate(
&ring_file,
MemoryMappingBuilder::new(
ring_params.cq_off.cqes as usize
+ ring_params.cq_entries as usize * std::mem::size_of::<io_uring_cqe>(),
u64::from(IORING_OFF_CQ_RING),
)
.from_descriptor(&ring_file)
.offset(u64::from(IORING_OFF_CQ_RING))
.populate()
.build()
.map_err(Error::MappingCompleteRing)?,
&ring_params,
);

View file

@ -33,7 +33,7 @@ use kvm_sys::*;
use base::{
block_signal, ioctl, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref,
ioctl_with_val, pagesize, signal, unblock_signal, warn, Error, Event, IoctlNr, MappedRegion,
MemoryMapping, MmapError, Result, SIGRTMIN,
MemoryMapping, MemoryMappingBuilder, MmapError, Result, SIGRTMIN,
};
use msg_socket::MsgOnSocket;
use vm_memory::{GuestAddress, GuestMemory};
@ -950,8 +950,10 @@ impl Vcpu {
// the value of the fd and we own the fd.
let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
let run_mmap =
MemoryMapping::from_descriptor(&vcpu, run_mmap_size).map_err(|_| Error::new(ENOSPC))?;
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
.from_descriptor(&vcpu)
.build()
.map_err(|_| Error::new(ENOSPC))?;
Ok(Vcpu { vcpu, run_mmap })
}
@ -1753,10 +1755,10 @@ mod tests {
.unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false)
.unwrap();
}
@ -1767,7 +1769,7 @@ mod tests {
let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false)
.unwrap();
}
@ -1778,7 +1780,7 @@ mod tests {
let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
let mem_ptr = mem.as_ptr();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
@ -1802,7 +1804,7 @@ mod tests {
let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x2000;
let mem = MemoryMapping::new(mem_size).unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
assert!(vm
.add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false)
.is_err());

View file

@ -4,7 +4,7 @@
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMapping, SharedMemory};
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
@ -20,7 +20,9 @@ fn test_run() {
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMapping::from_descriptor(&mem, mem_size as usize)
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_descriptor(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
@ -45,7 +47,9 @@ fn test_run() {
.add_memory_region(
GuestAddress(0),
Box::new(
MemoryMapping::from_descriptor(&mem, mem_size as usize)
MemoryMappingBuilder::new(mem_size as usize)
.from_descriptor(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,

View file

@ -4,7 +4,7 @@
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMapping, SharedMemory};
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
@ -22,7 +22,9 @@ fn test_run() {
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMapping::from_descriptor(&mem, mem_size as usize)
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_descriptor(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
@ -47,7 +49,9 @@ fn test_run() {
vm.add_memory_region(
GuestAddress(0),
Box::new(
MemoryMapping::from_descriptor(&mem, mem_size as usize)
MemoryMappingBuilder::new(mem_size as usize)
.from_descriptor(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,
@ -58,15 +62,19 @@ fn test_run() {
// Give some read only memory for the test code to read from and force a vcpu exit when it reads
// from it.
let mem_ro = SharedMemory::anon(0x1000).expect("failed to create shared memory");
let mmap_ro =
MemoryMapping::from_descriptor(&mem_ro, 0x1000).expect("failed to create memory mapping");
let mmap_ro = MemoryMappingBuilder::new(0x1000)
.from_descriptor(&mem_ro)
.build()
.expect("failed to create memory mapping");
mmap_ro
.write_obj(vcpu_regs.rax as u8, 0)
.expect("failed writing data to ro memory");
vm.add_memory_region(
GuestAddress(vcpu_sregs.es.base),
Box::new(
MemoryMapping::from_descriptor(&mem_ro, 0x1000)
MemoryMappingBuilder::new(0x1000)
.from_descriptor(&mem_ro)
.build()
.expect("failed to create memory mapping"),
),
true,

View file

@ -21,8 +21,8 @@ use libc::{pid_t, waitpid, EINVAL, ENODATA, ENOTTY, WEXITSTATUS, WIFEXITED, WNOH
use protobuf::Message;
use base::{
error, Error as SysError, Event, Killable, MemoryMapping, Result as SysResult, ScmSocket,
SharedMemory, SharedMemoryUnix, SIGRTMIN,
error, Error as SysError, Event, Killable, MemoryMappingBuilder, Result as SysResult,
ScmSocket, SharedMemory, SharedMemoryUnix, SIGRTMIN,
};
use kvm::{dirty_log_bitmap_size, Datamatch, IoeventAddress, IrqRoute, IrqSource, PicId, Vm};
use kvm_sys::{kvm_clock_data, kvm_ioapic_state, kvm_pic_state, kvm_pit_state2};
@ -360,7 +360,10 @@ impl Process {
None => return Err(SysError::new(EOVERFLOW)),
_ => {}
}
let mem = MemoryMapping::from_descriptor_offset(&shm, length as usize, offset)
let mem = MemoryMappingBuilder::new(length as usize)
.from_descriptor(&shm)
.offset(offset)
.build()
.map_err(mmap_to_sys_err)?;
let slot =
vm.add_memory_region(GuestAddress(start), Box::new(mem), read_only, dirty_log)?;

View file

@ -23,6 +23,8 @@ use crate::{errno, pagesize};
pub enum Error {
/// Requested memory out of range.
InvalidAddress,
/// Invalid argument provided when building mmap.
InvalidArgument,
/// Requested offset is out of range of `libc::off_t`.
InvalidOffset,
/// Requested mapping is not page aligned
@ -44,6 +46,7 @@ impl Display for Error {
match self {
InvalidAddress => write!(f, "requested memory out of range"),
InvalidArgument => write!(f, "invalid argument provided when creating mapping"),
InvalidOffset => write!(f, "requested offset is out of range of off_t"),
NotPageAligned => write!(f, "requested memory is not page aligned"),
InvalidRange(offset, count, region_size) => write!(
@ -212,26 +215,6 @@ impl MemoryMapping {
MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
}
/// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
/// that the pages are pre-populated.
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn from_fd_offset_populate(
fd: &dyn AsRawFd,
size: usize,
offset: u64,
) -> Result<MemoryMapping> {
MemoryMapping::from_fd_offset_flags(
fd,
size,
offset,
libc::MAP_SHARED | libc::MAP_POPULATE,
Protection::read_write(),
)
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
///
/// # Arguments
@ -270,6 +253,26 @@ impl MemoryMapping {
MemoryMapping::from_fd_offset_flags(fd, size, offset, libc::MAP_SHARED, prot)
}
/// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
/// that the pages are pre-populated.
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn from_fd_offset_protection_populate(
fd: &dyn AsRawFd,
size: usize,
offset: u64,
prot: Protection,
populate: bool,
) -> Result<MemoryMapping> {
let mut flags = libc::MAP_SHARED;
if populate {
flags |= libc::MAP_POPULATE;
}
MemoryMapping::from_fd_offset_flags(fd, size, offset, flags, prot)
}
/// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
///
/// # Arguments

View file

@ -20,8 +20,8 @@ use std::sync::Arc;
use libc::{EINVAL, EIO, ENODEV};
use base::{
error, AsRawDescriptor, Error as SysError, Event, ExternalMapping, MappedRegion, MemoryMapping,
MmapError, RawDescriptor, Result,
error, AsRawDescriptor, Error as SysError, Event, ExternalMapping, MappedRegion,
MemoryMappingBuilder, MmapError, RawDescriptor, Result,
};
use hypervisor::{IrqRoute, IrqSource, Vm};
use msg_socket::{MsgError, MsgOnSocket, MsgReceiver, MsgResult, MsgSender, MsgSocket};
@ -377,7 +377,11 @@ impl VmMemoryRequest {
offset,
gpa,
} => {
let mmap = match MemoryMapping::from_descriptor_offset(fd, size, offset as u64) {
let mmap = match MemoryMappingBuilder::new(size)
.from_descriptor(fd)
.offset(offset as u64)
.build()
{
Ok(v) => v,
Err(_e) => return VmMemoryResponse::Err(SysError::new(EINVAL)),
};
@ -582,7 +586,7 @@ fn register_memory(
size: usize,
pci_allocation: Option<(Alloc, u64)>,
) -> Result<(u64, MemSlot)> {
let mmap = match MemoryMapping::from_descriptor(fd, size) {
let mmap = match MemoryMappingBuilder::new(size).from_descriptor(fd).build() {
Ok(v) => v,
Err(MmapError::SystemCallFailed(e)) => return Err(e),
_ => return Err(SysError::new(EINVAL)),

View file

@ -15,8 +15,8 @@ use std::sync::Arc;
use crate::guest_address::GuestAddress;
use base::{pagesize, Error as SysError};
use base::{
AsRawDescriptor, MappedRegion, MemfdSeals, MemoryMapping, MmapError, SharedMemory,
SharedMemoryUnix,
AsRawDescriptor, MappedRegion, MemfdSeals, MemoryMapping, MemoryMappingBuilder, MmapError,
SharedMemory, SharedMemoryUnix,
};
use cros_async::{
uring_mem::{self, BorrowedIoVec},
@ -178,7 +178,10 @@ impl GuestMemory {
let size =
usize::try_from(range.1).map_err(|_| Error::MemoryRegionTooLarge(range.1))?;
let mapping = MemoryMapping::from_descriptor_offset(&memfd, size, offset)
let mapping = MemoryMappingBuilder::new(size)
.from_descriptor(&memfd)
.offset(offset)
.build()
.map_err(Error::MemoryMappingFailed)?;
regions.push(MemoryRegion {
mapping,
@ -851,8 +854,11 @@ mod tests {
.unwrap();
let _ = gm.with_regions::<_, ()>(|index, _, size, _, memfd_offset| {
let mmap =
MemoryMapping::from_descriptor_offset(gm.as_ref(), size, memfd_offset).unwrap();
let mmap = MemoryMappingBuilder::new(size)
.from_descriptor(gm.as_ref())
.offset(memfd_offset)
.build()
.unwrap();
if index == 0 {
assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);