sys_util: add MemoryMappingArena

There is a hard-limit to the number of MemoryMaps that can be added to a
KVM VM, a arch-dependent number defined as KVM_USER_MEM_SLOTS. e.g: on
x86 this is 509 (512 - 3 internal slots).

For most purposes, this isn't too much of an issue, but there are some
cases where one might want to share a lot of mmaps with a Guest. e.g:
virtio-fs uses a large cache region for mapping in slices of file fds
directly into guest memory. If one tries to add a new KVM memory region
for each mmap, the number of available slots is quickly exhausted.

MemoryMappingArena is a way to work around this limitation by allocating
a single KVM memory region for a large slice of memory, and then using
mmap with MAP_FIXED to override slices of this "arena" hostside, thereby
achieving the same effect without quickly exhausting the number of KVM
memory region slots.

BUG=chromium:936567
TEST=cargo test -p sys_util

Change-Id: I89cc3b22cdba6756b2d76689176d7147cf238f07
Reviewed-on: https://chromium-review.googlesource.com/1546600
Commit-Ready: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
This commit is contained in:
Daniel Prilik 2019-03-29 10:48:57 -07:00 committed by chrome-bot
parent c211a6ccc6
commit d49adc9005
2 changed files with 549 additions and 67 deletions

View file

@ -13,7 +13,6 @@ extern crate msg_socket;
mod cap;
use std::cmp::{min, Ordering};
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap};
use std::fs::File;
use std::mem::size_of;
@ -32,7 +31,8 @@ use sys_util::{
ioctl, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref, ioctl_with_val,
};
use sys_util::{
pagesize, signal, Error, EventFd, GuestAddress, GuestMemory, MemoryMapping, Result,
pagesize, signal, Error, EventFd, GuestAddress, GuestMemory, MemoryMapping, MemoryMappingArena,
Result,
};
pub use crate::cap::*;
@ -79,7 +79,7 @@ unsafe fn set_user_memory_region<F: AsRawFd>(
log_dirty_pages: bool,
guest_addr: u64,
memory_size: u64,
userspace_addr: u64,
userspace_addr: *mut u8,
) -> Result<()> {
let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
if log_dirty_pages {
@ -90,7 +90,7 @@ unsafe fn set_user_memory_region<F: AsRawFd>(
flags,
guest_phys_addr: guest_addr,
memory_size,
userspace_addr,
userspace_addr: userspace_addr as u64,
};
let ret = ioctl_with_ref(fd, KVM_SET_USER_MEMORY_REGION(), &region);
@ -300,6 +300,7 @@ pub struct Vm {
vm: File,
guest_mem: GuestMemory,
device_memory: HashMap<u32, MemoryMapping>,
mmap_arenas: HashMap<u32, MemoryMappingArena>,
mem_slot_gaps: BinaryHeap<MemSlot>,
}
@ -322,7 +323,7 @@ impl Vm {
false,
guest_addr.offset() as u64,
size as u64,
host_addr as u64,
host_addr as *mut u8,
)
}
})?;
@ -331,6 +332,7 @@ impl Vm {
vm: vm_file,
guest_mem,
device_memory: HashMap::new(),
mmap_arenas: HashMap::new(),
mem_slot_gaps: BinaryHeap::new(),
})
} else {
@ -338,6 +340,49 @@ impl Vm {
}
}
// Helper method for `set_user_memory_region` that tracks available slots.
unsafe fn set_user_memory_region(
&mut self,
read_only: bool,
log_dirty_pages: bool,
guest_addr: u64,
memory_size: u64,
userspace_addr: *mut u8,
) -> Result<u32> {
let slot = match self.mem_slot_gaps.pop() {
Some(gap) => gap.0,
None => {
(self.device_memory.len()
+ self.guest_mem.num_regions() as usize
+ self.mmap_arenas.len()) as u32
}
};
let res = set_user_memory_region(
&self.vm,
slot,
read_only,
log_dirty_pages,
guest_addr,
memory_size,
userspace_addr,
);
match res {
Ok(_) => Ok(slot),
Err(e) => {
self.mem_slot_gaps.push(MemSlot(slot));
Err(e)
}
}
}
// Helper method for `set_user_memory_region` that tracks available slots.
unsafe fn remove_user_memory_region(&mut self, slot: u32) -> Result<()> {
set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?;
self.mem_slot_gaps.push(MemSlot(slot));
Ok(())
}
/// Checks if a particular `Cap` is available.
///
/// This is distinct from the `Kvm` version of this method because the some extensions depend on
@ -374,29 +419,18 @@ impl Vm {
return Err(Error::new(ENOSPC));
}
// If there are no gaps, the lowest slot number is equal to the number of slots we are
// currently using between guest memory and device memory. For example, if 2 slots are used
// by guest memory, 3 slots are used for device memory, and there are no gaps, it follows
// that the lowest unused slot is 2+3=5.
let slot = match self.mem_slot_gaps.pop() {
Some(gap) => gap.0,
None => (self.device_memory.len() + (self.guest_mem.num_regions() as usize)) as u32,
};
// Safe because we check that the given guest address is valid and has no overlaps. We also
// know that the pointer and size are correct because the MemoryMapping interface ensures
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
// is removed.
unsafe {
set_user_memory_region(
&self.vm,
slot,
let slot = unsafe {
self.set_user_memory_region(
read_only,
log_dirty_pages,
guest_addr.offset() as u64,
mem.size() as u64,
mem.as_ptr() as u64,
)?;
mem.as_ptr(),
)?
};
self.device_memory.insert(slot, mem);
@ -407,19 +441,82 @@ impl Vm {
///
/// Ownership of the host memory mapping associated with the given slot is returned on success.
pub fn remove_device_memory(&mut self, slot: u32) -> Result<MemoryMapping> {
match self.device_memory.entry(slot) {
Entry::Occupied(entry) => {
// Safe because the slot is checked against the list of device memory slots.
unsafe {
set_user_memory_region(&self.vm, slot, false, false, 0, 0, 0)?;
}
self.mem_slot_gaps.push(MemSlot(slot));
Ok(entry.remove())
if self.device_memory.contains_key(&slot) {
// Safe because the slot is checked against the list of device memory slots.
unsafe {
self.remove_user_memory_region(slot)?;
}
_ => Err(Error::new(ENOENT)),
// Safe to unwrap since map is checked to contain key
Ok(self.device_memory.remove(&slot).unwrap())
} else {
Err(Error::new(ENOENT))
}
}
/// Inserts the given `MemoryMappingArena` into the VM's address space at `guest_addr`.
///
/// The slot that was assigned the device memory mapping is returned on success. The slot can be
/// given to `Vm::remove_mmap_arena` to remove the memory from the VM's address space and
/// take back ownership of `mmap_arena`.
///
/// Note that memory inserted into the VM's address space must not overlap with any other memory
/// slot's region.
///
/// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
/// write will trigger a mmio VM exit, leaving the memory untouched.
///
/// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
/// by the guest with `get_dirty_log`.
pub fn add_mmap_arena(
&mut self,
guest_addr: GuestAddress,
mmap_arena: MemoryMappingArena,
read_only: bool,
log_dirty_pages: bool,
) -> Result<u32> {
if guest_addr < self.guest_mem.end_addr() {
return Err(Error::new(ENOSPC));
}
// Safe because we check that the given guest address is valid and has no overlaps. We also
// know that the pointer and size are correct because the MemoryMapping interface ensures
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
// is removed.
let slot = unsafe {
self.set_user_memory_region(
read_only,
log_dirty_pages,
guest_addr.offset() as u64,
mmap_arena.size() as u64,
mmap_arena.as_ptr(),
)?
};
self.mmap_arenas.insert(slot, mmap_arena);
Ok(slot)
}
/// Removes memory map arena that was previously added at the given slot.
///
/// Ownership of the host memory mapping associated with the given slot is returned on success.
pub fn remove_mmap_arena(&mut self, slot: u32) -> Result<MemoryMappingArena> {
if self.mmap_arenas.contains_key(&slot) {
// Safe because the slot is checked against the list of device memory slots.
unsafe {
self.remove_user_memory_region(slot)?;
}
// Safe to unwrap since map is checked to contain key
Ok(self.mmap_arenas.remove(&slot).unwrap())
} else {
Err(Error::new(ENOENT))
}
}
/// Get a mutable reference to the memory map arena added at the given slot.
pub fn get_mmap_arena(&mut self, slot: u32) -> Option<&mut MemoryMappingArena> {
self.mmap_arenas.get_mut(&slot)
}
/// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
/// `slot`.
///

View file

@ -6,8 +6,10 @@
//! mmap object leaves scope.
use std;
use std::collections::BTreeMap;
use std::fmt::{self, Display};
use std::io::{Read, Write};
use std::mem::ManuallyDrop;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
@ -15,7 +17,7 @@ use data_model::volatile_memory::*;
use data_model::DataInit;
use libc::{self, c_int};
use crate::errno;
use crate::{errno, pagesize};
#[derive(Debug)]
pub enum Error {
@ -23,6 +25,10 @@ pub enum Error {
InvalidAddress,
/// Requested offset is out of range of `libc::off_t`.
InvalidOffset,
/// Requested mapping is not page aligned
NotPageAligned,
/// Overlapping regions
Overlapping(usize, usize),
/// Requested memory range spans past the end of the region.
InvalidRange(usize, usize),
/// Couldn't read from the given source.
@ -43,6 +49,12 @@ impl Display for Error {
match self {
InvalidAddress => write!(f, "requested memory out of range"),
InvalidOffset => write!(f, "requested offset is out of range of off_t"),
NotPageAligned => write!(f, "requested memory is not page aligned"),
Overlapping(offset, count) => write!(
f,
"requested memory range overlaps with existing region: offset={} size={}",
offset, count
),
InvalidRange(offset, count) => write!(
f,
"requested memory range spans past the end of the region: offset={} count={}",
@ -117,7 +129,7 @@ impl MemoryMapping {
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> Result<MemoryMapping> {
Self::new_protection(size, Protection::read_write())
MemoryMapping::new_protection(size, Protection::read_write())
}
/// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
@ -128,34 +140,18 @@ impl MemoryMapping {
pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
unsafe {
MemoryMapping::try_mmap(
None,
size,
prot.into(),
libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
-1,
0,
None,
)
};
if addr == libc::MAP_FAILED {
return Err(Error::SystemCallFailed(errno::Error::last()));
}
// This is safe because we call madvise with a valid address and size, and we check the
// return value. We only warn about an error because failure here is not fatal to the mmap.
if unsafe { libc::madvise(addr, size, libc::MADV_DONTDUMP) } == -1 {
warn!(
"failed madvise(MADV_DONTDUMP) on mmap: {}",
errno::Error::last()
);
}
Ok(MemoryMapping {
addr: addr as *mut u8,
size,
})
}
/// Maps the first `size` bytes of the given `fd`.
/// Maps the first `size` bytes of the given `fd` as read/write.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
@ -164,34 +160,121 @@ impl MemoryMapping {
MemoryMapping::from_fd_offset(fd, size, 0)
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
pub fn from_fd_offset(fd: &dyn AsRawFd, size: usize, offset: usize) -> Result<MemoryMapping> {
MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn from_fd_offset(fd: &dyn AsRawFd, size: usize, offset: usize) -> Result<MemoryMapping> {
if offset > libc::off_t::max_value() as usize {
return Err(Error::InvalidOffset);
}
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub fn from_fd_offset_protection(
fd: &dyn AsRawFd,
size: usize,
offset: usize,
prot: Protection,
) -> Result<MemoryMapping> {
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
unsafe {
MemoryMapping::try_mmap(
None,
size,
libc::PROT_READ | libc::PROT_WRITE,
prot.into(),
libc::MAP_SHARED,
fd.as_raw_fd(),
offset as libc::off_t,
Some((fd, offset)),
)
}
}
/// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
/// Unsafe: unmaps any mmap'd regions already present at (addr..addr+size).
///
/// # Arguments
/// * `addr` - Memory address to mmap at.
/// * `size` - Size of memory region in bytes.
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub unsafe fn new_protection_fixed(
addr: *mut u8,
size: usize,
prot: Protection,
) -> Result<MemoryMapping> {
MemoryMapping::try_mmap(
Some(addr),
size,
prot.into(),
libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
None,
)
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd` with
/// `prot` protections.
/// Unsafe: unmaps any mmap'd regions already present at (addr..addr+size).
///
/// # Arguments
/// * `addr` - Memory address to mmap at.
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub unsafe fn from_fd_offset_protection_fixed(
addr: *mut u8,
fd: &dyn AsRawFd,
size: usize,
offset: usize,
prot: Protection,
) -> Result<MemoryMapping> {
MemoryMapping::try_mmap(
Some(addr),
size,
prot.into(),
libc::MAP_SHARED | libc::MAP_NORESERVE,
Some((fd, offset)),
)
}
/// Helper wrapper around libc::mmap that does some basic validation, and calls
/// madvise with MADV_DONTDUMP on the created mmap
unsafe fn try_mmap(
addr: Option<*mut u8>,
size: usize,
prot: c_int,
flags: c_int,
fd: Option<(&AsRawFd, usize)>,
) -> Result<MemoryMapping> {
let mut flags = flags;
// If addr is provided, set the FIXED flag, and validate addr alignment
let addr = match addr {
Some(addr) => {
if (addr as usize) % pagesize() != 0 {
return Err(Error::NotPageAligned);
}
flags = flags | libc::MAP_FIXED;
addr as *mut libc::c_void
}
None => null_mut(),
};
// If fd is provided, validate fd offset is within bounds
let (fd, offset) = match fd {
Some((fd, offset)) => {
if offset > libc::off_t::max_value() as usize {
return Err(Error::InvalidOffset);
}
(fd.as_raw_fd(), offset as libc::off_t)
}
None => (-1, 0),
};
let addr = libc::mmap(addr, size, prot, flags, fd, offset);
if addr == libc::MAP_FAILED {
return Err(Error::SystemCallFailed(errno::Error::last()));
}
// This is safe because we call madvise with a valid address and size, and we check the
// return value. We only warn about an error because failure here is not fatal to the mmap.
if unsafe { libc::madvise(addr, size, libc::MADV_DONTDUMP) } == -1 {
if libc::madvise(addr, size, libc::MADV_DONTDUMP) == -1 {
warn!(
"failed madvise(MADV_DONTDUMP) on mmap: {}",
errno::Error::last()
@ -203,7 +286,7 @@ impl MemoryMapping {
})
}
/// Returns a pointer to the begining of the memory region. Should only be
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
@ -214,6 +297,23 @@ impl MemoryMapping {
self.size
}
/// Calls msync with MS_SYNC on the mapping.
pub fn msync(&self) -> Result<()> {
// This is safe since we use the exact address and length of a known
// good memory mapping.
let ret = unsafe {
libc::msync(
self.as_ptr() as *mut libc::c_void,
self.size(),
libc::MS_SYNC,
)
};
if ret == -1 {
return Err(Error::SystemCallFailed(errno::Error::last()));
}
Ok(())
}
/// Writes a slice to the memory region at the specified offset.
/// Returns the number of bytes written. The number of bytes written can
/// be less than the length of the slice if there isn't enough room in the
@ -468,6 +568,236 @@ impl Drop for MemoryMapping {
}
}
/// Tracks Fixed Memory Maps within an anonymous memory-mapped fixed-sized arena
/// in the current process.
pub struct MemoryMappingArena {
addr: *mut u8,
size: usize,
// When doing in-place swaps of MemoryMappings, the BTreeMap returns a owned
// instance of the old MemoryMapping. When the old MemoryMapping falls out
// of scope, it calls munmap on the same region as the new MemoryMapping
// that was just mapped in. To avoid accidentally munmapping the new,
// MemoryMapping, all mappings are wrapped in a ManuallyDrop, and then
// "forgotten" when removed from the BTreeMap
maps: BTreeMap<usize, ManuallyDrop<MemoryMapping>>,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MemoryMappingArena {}
unsafe impl Sync for MemoryMappingArena {}
impl MemoryMappingArena {
/// Creates an mmap arena of `size` bytes.
///
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> Result<MemoryMappingArena> {
// Reserve the arena's memory using an anonymous read-only mmap.
// The actual MemoryMapping object is forgotten, with
// MemoryMappingArena manually calling munmap on drop.
let mmap = MemoryMapping::new_protection(size, Protection::none().set_read())?;
let addr = mmap.as_ptr();
let size = mmap.size();
std::mem::forget(mmap);
Ok(MemoryMappingArena {
addr,
size,
maps: BTreeMap::new(),
})
}
/// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
/// `offset` must be page aligned.
///
/// # Arguments
/// * `offset` - Page aligned offset into the arena in bytes.
/// * `size` - Size of memory region in bytes.
/// * `fd` - File descriptor to mmap from.
pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> {
self.try_add(offset, size, Protection::read_write(), None)
}
/// Maps `size` bytes from the start of the given `fd` at `offset` bytes from
/// the start of the arena. `offset` must be page aligned.
///
/// # Arguments
/// * `offset` - Page aligned offset into the arena in bytes.
/// * `size` - Size of memory region in bytes.
/// * `fd` - File descriptor to mmap from.
pub fn add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawFd) -> Result<()> {
self.add_fd_offset(offset, size, fd, 0)
}
/// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
/// at `offset` bytes from the start of the arena. `offset` must be page aligned.
///
/// # Arguments
/// * `offset` - Page aligned offset into the arena in bytes.
/// * `size` - Size of memory region in bytes.
/// * `fd` - File descriptor to mmap from.
/// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn add_fd_offset(
&mut self,
offset: usize,
size: usize,
fd: &dyn AsRawFd,
fd_offset: usize,
) -> Result<()> {
self.add_fd_offset_protection(offset, size, fd, fd_offset, Protection::read_write())
}
/// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
/// at `offset` bytes from the start of the arena with `prot` protections.
/// `offset` must be page aligned.
///
/// # Arguments
/// * `offset` - Page aligned offset into the arena in bytes.
/// * `size` - Size of memory region in bytes.
/// * `fd` - File descriptor to mmap from.
/// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub fn add_fd_offset_protection(
&mut self,
offset: usize,
size: usize,
fd: &dyn AsRawFd,
fd_offset: usize,
prot: Protection,
) -> Result<()> {
self.try_add(offset, size, prot, Some((fd, fd_offset)))
}
/// Helper method that calls appropriate MemoryMapping constructor and adds
/// the resulting map into the arena.
fn try_add(
&mut self,
offset: usize,
size: usize,
prot: Protection,
fd: Option<(&AsRawFd, usize)>,
) -> Result<()> {
self.validate_range(offset, size)?;
// This is safe since the range has been validated.
let mmap = unsafe {
match fd {
Some((fd, fd_offset)) => MemoryMapping::from_fd_offset_protection_fixed(
(self.addr as usize + offset) as *mut u8,
fd,
size,
fd_offset,
prot,
)?,
None => MemoryMapping::new_protection_fixed(
(self.addr as usize + offset) as *mut u8,
size,
prot,
)?,
}
};
self.maps.insert(offset, ManuallyDrop::new(mmap));
Ok(())
}
/// Removes a mapping at `offset` from the start of the arena.
/// Returns a boolean indicating if there was a mapping present at `offset`.
/// If none was present, this method is a noop.
pub fn remove(&mut self, offset: usize) -> Result<bool> {
if let Some(mmap) = self.maps.remove(&offset) {
// Instead of munmapping the memory map, leaving an unprotected hole
// in the arena, swap this mmap with an anonymous protection.
// This is safe since the memory mapping perfectly overlaps with an
// existing, known good memory mapping.
let mmap = unsafe {
MemoryMapping::new_protection_fixed(
mmap.as_ptr(),
mmap.size(),
Protection::none().set_read(),
)?
};
self.maps.insert(offset, ManuallyDrop::new(mmap));
Ok(true)
} else {
Ok(false)
}
}
/// Calls msync with MS_SYNC on the mapping at `offset` from the start of
/// the arena.
/// Returns a boolean indicating if there was a mapping present at `offset`.
/// If none was present, this method is a noop.
pub fn msync(&self, offset: usize) -> Result<bool> {
if let Some(mmap) = self.maps.get(&offset) {
mmap.msync()?;
Ok(true)
} else {
Ok(false)
}
}
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
/// Returns the size of the memory region in bytes.
pub fn size(&self) -> usize {
self.size
}
/// Validates `offset` and `size`.
/// Checks that offset..offset+size doesn't overlap with existing mappings.
/// Also ensures correct alignment, and checks for any overflow.
/// Note: offset..offset+size is considered valid if it _perfectly_ overlaps
/// with single other region.
fn validate_range(&self, offset: usize, size: usize) -> Result<()> {
// Ensure offset is page-aligned
if offset % pagesize() != 0 {
return Err(Error::NotPageAligned);
}
// Ensure offset + size doesn't overflow
let end_offset = offset.checked_add(size).ok_or(Error::InvalidAddress)?;
// Ensure offset + size are within the arena bounds
if end_offset > self.size {
return Err(Error::InvalidAddress);
}
// Ensure offset..offset+size doesn't overlap with existing regions
// Find the offset + size of the first mapping before the desired offset
let (prev_offset, prev_size) = match self.maps.range(..offset).rev().next() {
Some((offset, mmap)) => (*offset, mmap.size()),
None => {
// Empty map
return Ok(());
}
};
if offset == prev_offset {
// Perfectly overlapping regions are allowed
if size != prev_size {
return Err(Error::Overlapping(offset, size));
}
} else if offset < (prev_offset + prev_size) {
return Err(Error::Overlapping(offset, size));
}
Ok(())
}
}
impl Drop for MemoryMappingArena {
fn drop(&mut self) {
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
libc::munmap(self.addr as *mut libc::c_void, self.size);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -560,4 +890,59 @@ mod tests {
e => panic!("unexpected error: {}", e),
}
}
#[test]
fn arena_new() {
let m = MemoryMappingArena::new(0x40000).unwrap();
assert_eq!(m.size(), 0x40000);
}
#[test]
fn arena_add() {
let mut m = MemoryMappingArena::new(0x40000).unwrap();
assert!(m.add_anon(0, pagesize() * 4).is_ok());
}
#[test]
fn arena_remove() {
let mut m = MemoryMappingArena::new(0x40000).unwrap();
assert!(m.add_anon(0, pagesize() * 4).is_ok());
assert!(m.remove(0).unwrap(), true);
assert!(m.remove(0).unwrap(), false);
}
#[test]
fn arena_add_overlap_error() {
let page = pagesize();
let mut m = MemoryMappingArena::new(page * 4).unwrap();
assert!(m.add_anon(0, page * 4).is_ok());
let res = m.add_anon(page, page).unwrap_err();
match res {
Error::Overlapping(a, o) => {
assert_eq!((a, o), (page, page));
}
e => panic!("unexpected error: {}", e),
}
}
#[test]
fn arena_add_alignment_error() {
let mut m = MemoryMappingArena::new(pagesize() * 2).unwrap();
assert!(m.add_anon(0, 0x100).is_ok());
let res = m.add_anon(pagesize() + 1, 0x100).unwrap_err();
match res {
Error::NotPageAligned => {}
e => panic!("unexpected error: {}", e),
}
}
#[test]
fn arena_add_oob_error() {
let mut m = MemoryMappingArena::new(pagesize()).unwrap();
let res = m.add_anon(0, pagesize() + 1).unwrap_err();
match res {
Error::InvalidAddress => {}
e => panic!("unexpected error: {}", e),
}
}
}