Reland "vm_control: reduce user memory region modifications"

This is a reland of commit 22c212d54f

This reland avoids using the new pre-mapped memory regions on arm
devices, since there is insufficient address space. The new path is
still used on aarch64.

Original change's description:
> vm_control: reduce user memory region modifications
>
> Reduce how often KVM_SET_USER_MEMORY_REGION is called when the tdp mmu
> is enabled. With the tdp mmu, there is no memory overhead from creating
> large memory regions (at least until a nested VM is started). Simply
> mmap'ing/munmap'ing fds within a pre-created memory region is more
> efficient. It also addresses audio jank caused by removing a memory
> region.
>
> Adding this support to VmMemoryRequest will allow FsMappingRequest to be
> removed in a later change.
>
> BUG=b:274037632
> TEST=tast run arc.Boot.vm
> TEST=manually launch gedit in crostini
>
> Change-Id: I2ac02454ecb734c9707b6d67546135134b887527
> Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4402068
> Reviewed-by: Dennis Kempin <denniskempin@google.com>
> Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
> Commit-Queue: David Stevens <stevensd@chromium.org>

Bug: b:274037632
Change-Id: I5deedfd3a030640f9af950cee675fac0d9a411a0
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4421352
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Commit-Queue: Dennis Kempin <denniskempin@google.com>
Reviewed-by: Dennis Kempin <denniskempin@google.com>
This commit is contained in:
David Stevens 2023-04-06 19:30:38 +09:00 committed by crosvm LUCI
parent 307b57809a
commit df2625c599
11 changed files with 263 additions and 96 deletions

1
Cargo.lock generated
View file

@ -2499,6 +2499,7 @@ dependencies = [
"gdbstub_arch",
"hypervisor",
"libc",
"once_cell",
"remain",
"resources",
"rutabaga_gfx",

View file

@ -32,7 +32,6 @@ use base::RawDescriptor;
use base::Tube;
use base::WaitContext;
use base::WorkerThread;
use hypervisor::MemSlot;
use resources::AddressRange;
use resources::Alloc;
use resources::AllocOptions;
@ -43,6 +42,7 @@ use vfio_sys::*;
use vm_control::HotPlugDeviceInfo;
use vm_control::HotPlugDeviceType;
use vm_control::VmMemoryDestination;
use vm_control::VmMemoryRegionId;
use vm_control::VmMemoryRequest;
use vm_control::VmMemoryResponse;
use vm_control::VmMemorySource;
@ -668,7 +668,7 @@ pub struct VfioPciDevice {
#[cfg(feature = "direct")]
i2c_devs: HashMap<u16, PathBuf>,
vcfg_shm_mmap: Option<MemoryMapping>,
mapped_mmio_bars: BTreeMap<PciBarIndex, (u64, Vec<MemSlot>)>,
mapped_mmio_bars: BTreeMap<PciBarIndex, (u64, Vec<VmMemoryRegionId>)>,
activated: bool,
}
@ -1183,8 +1183,8 @@ impl VfioPciDevice {
}
}
fn add_bar_mmap(&self, index: u32, bar_addr: u64) -> Vec<MemSlot> {
let mut mmaps_slots: Vec<MemSlot> = Vec::new();
fn add_bar_mmap(&self, index: u32, bar_addr: u64) -> Vec<VmMemoryRegionId> {
let mut mmaps_ids: Vec<VmMemoryRegionId> = Vec::new();
if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP != 0 {
// the bar storing msix table and pba couldn't mmap.
// these bars should be trapped, so that msix could be emulated.
@ -1198,7 +1198,7 @@ impl VfioPciDevice {
mmaps = self.remove_bar_mmap_lpss(index, mmaps);
}
if mmaps.is_empty() {
return mmaps_slots;
return mmaps_ids;
}
for mmap in mmaps.iter() {
@ -1232,22 +1232,22 @@ impl VfioPciDevice {
Err(_) => break,
};
match response {
VmMemoryResponse::RegisterMemory { pfn: _, slot } => {
mmaps_slots.push(slot);
VmMemoryResponse::RegisterMemory(id) => {
mmaps_ids.push(id);
}
_ => break,
}
}
}
mmaps_slots
mmaps_ids
}
fn remove_bar_mmap(&self, mmap_slots: &[MemSlot]) {
for mmap_slot in mmap_slots {
fn remove_bar_mmap(&self, mmap_ids: &[VmMemoryRegionId]) {
for mmap_id in mmap_ids {
if self
.vm_socket_mem
.send(&VmMemoryRequest::UnregisterMemory(*mmap_slot))
.send(&VmMemoryRequest::UnregisterMemory(*mmap_id))
.is_err()
{
error!("failed to send UnregisterMemory request");
@ -1260,8 +1260,8 @@ impl VfioPciDevice {
}
fn disable_bars_mmap(&mut self) {
for (_, (_, mmap_slots)) in self.mapped_mmio_bars.iter() {
self.remove_bar_mmap(mmap_slots);
for (_, (_, mmap_ids)) in self.mapped_mmio_bars.iter() {
self.remove_bar_mmap(mmap_ids);
}
self.mapped_mmio_bars.clear();
}
@ -1273,12 +1273,12 @@ impl VfioPciDevice {
let bar_idx = mmio_info.bar_index();
let addr = mmio_info.address();
if let Some((cur_addr, slots)) = self.mapped_mmio_bars.remove(&bar_idx) {
if let Some((cur_addr, ids)) = self.mapped_mmio_bars.remove(&bar_idx) {
if cur_addr == addr {
self.mapped_mmio_bars.insert(bar_idx, (cur_addr, slots));
self.mapped_mmio_bars.insert(bar_idx, (cur_addr, ids));
continue;
} else {
self.remove_bar_mmap(&slots);
self.remove_bar_mmap(&ids);
}
}
@ -1288,8 +1288,8 @@ impl VfioPciDevice {
}
for (bar_idx, addr) in needs_map.iter() {
let slots = self.add_bar_mmap(*bar_idx as u32, *addr);
self.mapped_mmio_bars.insert(*bar_idx, (*addr, slots));
let ids = self.add_bar_mmap(*bar_idx as u32, *addr);
self.mapped_mmio_bars.insert(*bar_idx, (*addr, ids));
}
}

View file

@ -48,8 +48,8 @@ use libc::MSG_PEEK;
use resources::Alloc;
use sync::Mutex;
use uuid::Uuid;
use vm_control::MemSlot;
use vm_control::VmMemoryDestination;
use vm_control::VmMemoryRegionId;
use vm_control::VmMemoryRequest;
use vm_control::VmMemoryResponse;
use vm_control::VmMemorySource;
@ -276,7 +276,7 @@ struct Worker {
slave_req_helper: SlaveReqHelper<SocketEndpoint<MasterReq>>,
// Stores memory regions that the worker has asked the main thread to register.
registered_memory: Vec<MemSlot>,
registered_memory: Vec<VmMemoryRegionId>,
// Channel for backend mesages.
slave_req_fd: Option<SocketEndpoint<SlaveReq>>,
@ -862,9 +862,9 @@ impl Worker {
match response {
VmMemoryResponse::Ok => Ok(()),
VmMemoryResponse::RegisterMemory { slot, .. } => {
VmMemoryResponse::RegisterMemory(id) => {
// Store the registered memory slot so we can unregister it when the thread ends.
self.registered_memory.push(slot);
self.registered_memory.push(id);
Ok(())
}
VmMemoryResponse::Err(e) => {
@ -1157,8 +1157,8 @@ impl Worker {
// Clean up memory regions that the worker registered so that the device can start another
// worker later.
fn cleanup_registered_memory(&mut self) {
while let Some(slot) = self.registered_memory.pop() {
let req = VmMemoryRequest::UnregisterMemory(slot);
while let Some(id) = self.registered_memory.pop() {
let req = VmMemoryRequest::UnregisterMemory(id);
if let Err(e) = self.send_memory_request(&req) {
error!("failed to unregister memory slot: {}", e);
}

View file

@ -8,6 +8,7 @@ use std::sync::Arc;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use acpi_tables::sdt::SDT;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::Context;
use base::error;
use base::info;
@ -34,8 +35,8 @@ use virtio_sys::virtio_config::VIRTIO_CONFIG_S_DRIVER_OK;
use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FAILED;
use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FEATURES_OK;
use virtio_sys::virtio_config::VIRTIO_CONFIG_S_NEEDS_RESET;
use vm_control::MemSlot;
use vm_control::VmMemoryDestination;
use vm_control::VmMemoryRegionId;
use vm_control::VmMemoryRequest;
use vm_control::VmMemoryResponse;
use vm_control::VmMemorySource;
@ -677,6 +678,8 @@ impl PciDevice for VirtioPciDevice {
.take()
.expect("missing shared_memory_tube"),
alloc,
// See comment VmMemoryRequest::execute
!self.device.expose_shmem_descriptors_with_viommu(),
)));
vec![config]
@ -982,15 +985,17 @@ impl Suspendable for VirtioPciDevice {
struct VmRequester {
tube: Tube,
alloc: Alloc,
mappings: BTreeMap<u64, MemSlot>,
mappings: BTreeMap<u64, VmMemoryRegionId>,
needs_prepare: bool,
}
impl VmRequester {
fn new(tube: Tube, alloc: Alloc) -> Self {
fn new(tube: Tube, alloc: Alloc, do_prepare: bool) -> Self {
Self {
tube,
alloc,
mappings: BTreeMap::new(),
needs_prepare: do_prepare,
}
}
}
@ -1002,6 +1007,20 @@ impl SharedMemoryMapper for VmRequester {
offset: u64,
prot: Protection,
) -> anyhow::Result<()> {
if self.needs_prepare {
self.tube
.send(&VmMemoryRequest::PrepareSharedMemoryRegion { alloc: self.alloc })
.context("failed to send request")?;
match self
.tube
.recv()
.context("failed to recieve request response")?
{
VmMemoryResponse::Ok => (),
e => bail!("unexpected response {:?}", e),
};
self.needs_prepare = false;
}
let request = VmMemoryRequest::RegisterMemory {
source,
dest: VmMemoryDestination::ExistingAllocation {
@ -1016,8 +1035,8 @@ impl SharedMemoryMapper for VmRequester {
.recv()
.context("failed to recieve request response")?
{
VmMemoryResponse::RegisterMemory { pfn: _, slot } => {
self.mappings.insert(offset, slot);
VmMemoryResponse::RegisterMemory(id) => {
self.mappings.insert(offset, id);
Ok(())
}
e => Err(anyhow!("unexpected response {:?}", e)),
@ -1025,9 +1044,9 @@ impl SharedMemoryMapper for VmRequester {
}
fn remove_mapping(&mut self, offset: u64) -> anyhow::Result<()> {
let slot = self.mappings.remove(&offset).context("invalid offset")?;
let id = self.mappings.remove(&offset).context("invalid offset")?;
self.tube
.send(&VmMemoryRequest::UnregisterMemory(slot))
.send(&VmMemoryRequest::UnregisterMemory(id))
.context("failed to send request")?;
match self
.tube

View file

@ -2892,6 +2892,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
#[cfg(feature = "balloon")]
let mut balloon_wss_id: u64 = 0;
let mut registered_evt_tubes: HashMap<RegisteredEvent, HashSet<AddressedTube>> = HashMap::new();
let mut region_state = VmMemoryRegionState::new();
'wait: loop {
let events = {
@ -3254,6 +3255,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
} else {
None
},
&mut region_state,
);
if let Err(e) = tube.send(&response) {
error!("failed to send VmMemoryControlResponse: {}", e);

View file

@ -184,6 +184,7 @@ use tube_transporter::TubeToken;
use tube_transporter::TubeTransporterReader;
use vm_control::BalloonControlCommand;
use vm_control::DeviceControlCommand;
use vm_control::VmMemoryRegionState;
use vm_control::VmMemoryRequest;
use vm_control::VmRunMode;
use vm_memory::GuestAddress;
@ -781,6 +782,7 @@ fn handle_readable_event<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
vcpu_boxes: &Mutex<Vec<Box<dyn VcpuArch>>>,
pvclock_host_tube: &Option<Tube>,
run_mode_arc: &VcpuRunMode,
region_state: &mut VmMemoryRegionState,
) -> Result<(bool, Option<ExitState>)> {
match event.token {
Token::VmEvent => match vm_evt_rdtube.recv::<VmEventType>() {
@ -829,6 +831,7 @@ fn handle_readable_event<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
&mut sys_allocator_mutex.lock(),
gralloc,
None,
region_state,
);
if let Err(e) = tube.send(&response) {
error!("failed to send VmMemoryControlResponse: {}", e);
@ -1043,6 +1046,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
}
let mut exit_state = ExitState::Stop;
let mut region_state = VmMemoryRegionState::new();
'poll: loop {
let events = {
@ -1076,6 +1080,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
vcpu_boxes.as_ref(),
&pvclock_host_tube,
run_mode_arc.as_ref(),
&mut region_state,
)?;
if let Some(state) = state {
exit_state = state;

View file

@ -19,6 +19,7 @@ gdbstub = { version = "0.6.3", optional = true }
gdbstub_arch = { version = "0.2.4", optional = true }
hypervisor = { path = "../hypervisor" }
libc = "*"
once_cell = "1.7.2"
remain = "*"
resources = { path = "../resources" }
rutabaga_gfx = { path = "../rutabaga_gfx"}

View file

@ -25,7 +25,9 @@ pub mod client;
pub mod display;
pub mod sys;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::convert::TryInto;
use std::fmt;
use std::fmt::Display;
@ -50,6 +52,7 @@ use base::info;
use base::warn;
use base::with_as_descriptor;
use base::AsRawDescriptor;
use base::Descriptor;
use base::Error as SysError;
use base::Event;
use base::ExternalMapping;
@ -481,6 +484,9 @@ pub struct IoEventUpdateRequest {
#[derive(Serialize, Deserialize)]
pub enum VmMemoryRequest {
/// Prepare a shared memory region to make later operations more efficient. This
/// may be a no-op depending on underlying platform support.
PrepareSharedMemoryRegion { alloc: Alloc },
RegisterMemory {
/// Source of the memory to register (mapped file descriptor, shared memory region, etc.)
source: VmMemorySource,
@ -500,7 +506,7 @@ pub enum VmMemoryRequest {
size: u64,
},
/// Unregister the given memory slot that was previously registered with `RegisterMemory`.
UnregisterMemory(MemSlot),
UnregisterMemory(VmMemoryRegionId),
/// Register an ioeventfd by looking up using Alloc info.
IoEventWithAlloc {
evt: Event,
@ -529,6 +535,69 @@ impl<'a> VmMemoryRequestIommuClient<'a> {
}
}
pub struct VmMemoryRegionState {
// alloc -> (pfn, slot)
slot_map: HashMap<Alloc, (u64, MemSlot)>,
// id -> (slot, Option<offset, size>)
mapped_regions: BTreeMap<VmMemoryRegionId, (MemSlot, Option<(usize, usize)>)>,
}
impl VmMemoryRegionState {
pub fn new() -> VmMemoryRegionState {
Self {
slot_map: HashMap::new(),
mapped_regions: BTreeMap::new(),
}
}
}
fn handle_prepared_region(
vm: &mut impl Vm,
region_state: &mut VmMemoryRegionState,
source: &VmMemorySource,
dest: &VmMemoryDestination,
prot: &Protection,
) -> Option<VmMemoryResponse> {
let VmMemoryDestination::ExistingAllocation { allocation, offset } = dest else {
return None;
};
let (pfn, slot) = region_state.slot_map.get(allocation)?;
let (descriptor, file_offset, size) = match source {
VmMemorySource::Descriptor {
descriptor,
offset,
size,
} => (
Descriptor(descriptor.as_raw_descriptor()),
*offset,
*size as usize,
),
VmMemorySource::SharedMemory(shm) => {
let size = shm.size() as usize;
(Descriptor(shm.as_raw_descriptor()), 0, size)
}
_ => return Some(VmMemoryResponse::Err(SysError::new(EINVAL))),
};
if let Err(err) = vm.add_fd_mapping(
*slot,
*offset as usize,
size,
&descriptor,
file_offset,
*prot,
) {
return Some(VmMemoryResponse::Err(err));
}
let pfn = pfn + (offset >> 12);
region_state.mapped_regions.insert(
VmMemoryRegionId(pfn),
(*slot, Some((*offset as usize, size))),
);
Some(VmMemoryResponse::RegisterMemory(VmMemoryRegionId(pfn)))
}
impl VmMemoryRequest {
/// Executes this request on the given Vm.
///
@ -545,10 +614,34 @@ impl VmMemoryRequest {
sys_allocator: &mut SystemAllocator,
gralloc: &mut RutabagaGralloc,
iommu_client: Option<&mut VmMemoryRequestIommuClient>,
region_state: &mut VmMemoryRegionState,
) -> VmMemoryResponse {
use self::VmMemoryRequest::*;
match self {
PrepareSharedMemoryRegion { alloc } => {
// Currently the iommu_client is only used by virtio-gpu, and virtio-gpu
// is incompatible with PrepareSharedMemoryRegion because we can't use
// add_fd_mapping with VmMemorySource::Vulkan.
assert!(iommu_client.is_none());
if !sys::should_prepare_memory_region() {
return VmMemoryResponse::Ok;
}
match sys::prepare_shared_memory_region(vm, sys_allocator, alloc) {
Ok(info) => {
region_state.slot_map.insert(alloc, info);
VmMemoryResponse::Ok
}
Err(e) => VmMemoryResponse::Err(e),
}
}
RegisterMemory { source, dest, prot } => {
if let Some(resp) = handle_prepared_region(vm, region_state, &source, &dest, &prot)
{
return resp;
}
// Correct on Windows because callers of this IPC guarantee descriptor is a mapping
// handle.
let (mapped_region, size, descriptor) = match source.map(gralloc, prot) {
@ -594,33 +687,43 @@ impl VmMemoryRequest {
}
let pfn = guest_addr.0 >> 12;
VmMemoryResponse::RegisterMemory { pfn, slot }
region_state
.mapped_regions
.insert(VmMemoryRegionId(pfn), (slot, None));
VmMemoryResponse::RegisterMemory(VmMemoryRegionId(pfn))
}
UnregisterMemory(slot) => match vm.remove_memory_region(slot) {
Ok(_) => {
if let Some(iommu_client) = iommu_client {
if iommu_client.gpu_memory.remove(&slot) {
let request = VirtioIOMMURequest::VfioCommand(
VirtioIOMMUVfioCommand::VfioDmabufUnmap(slot),
);
UnregisterMemory(id) => match region_state.mapped_regions.remove(&id) {
Some((slot, None)) => match vm.remove_memory_region(slot) {
Ok(_) => {
if let Some(iommu_client) = iommu_client {
if iommu_client.gpu_memory.remove(&slot) {
let request = VirtioIOMMURequest::VfioCommand(
VirtioIOMMUVfioCommand::VfioDmabufUnmap(slot),
);
match virtio_iommu_request(iommu_client.tube, &request) {
Ok(VirtioIOMMUResponse::VfioResponse(
VirtioIOMMUVfioResult::Ok,
)) => VmMemoryResponse::Ok,
resp => {
error!("Unexpected message response: {:?}", resp);
VmMemoryResponse::Err(SysError::new(EINVAL))
match virtio_iommu_request(iommu_client.tube, &request) {
Ok(VirtioIOMMUResponse::VfioResponse(
VirtioIOMMUVfioResult::Ok,
)) => VmMemoryResponse::Ok,
resp => {
error!("Unexpected message response: {:?}", resp);
VmMemoryResponse::Err(SysError::new(EINVAL))
}
}
} else {
VmMemoryResponse::Ok
}
} else {
VmMemoryResponse::Ok
}
} else {
VmMemoryResponse::Ok
}
}
Err(e) => VmMemoryResponse::Err(e),
Err(e) => VmMemoryResponse::Err(e),
},
Some((slot, Some((offset, size)))) => match vm.remove_mapping(slot, offset, size) {
Ok(()) => VmMemoryResponse::Ok,
Err(e) => VmMemoryResponse::Err(e),
},
None => VmMemoryResponse::Err(SysError::new(EINVAL)),
},
DynamicallyFreeMemoryRange {
guest_address,
@ -693,14 +796,15 @@ impl VmMemoryRequest {
}
}
#[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Ord, Clone, Copy)]
/// Identifer for registered memory regions. Globally unique.
// The current implementation uses pfn as the unique identifier.
pub struct VmMemoryRegionId(u64);
#[derive(Serialize, Deserialize, Debug)]
pub enum VmMemoryResponse {
/// The request to register memory into guest address space was successfully done at page frame
/// number `pfn` and memory slot number `slot`.
RegisterMemory {
pfn: u64,
slot: MemSlot,
},
/// The request to register memory into guest address space was successful.
RegisterMemory(VmMemoryRegionId),
Ok,
Err(SysError),
}

View file

@ -19,4 +19,4 @@ cfg_if::cfg_if! {
}
}
pub use platform::handle_request;
pub use platform::{handle_request, prepare_shared_memory_region, should_prepare_memory_region};

View file

@ -21,6 +21,7 @@ use hypervisor::MemSlot;
use hypervisor::Vm;
use libc::EINVAL;
use libc::ERANGE;
use once_cell::sync::Lazy;
use resources::Alloc;
use resources::SystemAllocator;
use serde::Deserialize;
@ -134,47 +135,65 @@ pub enum FsMappingRequest {
},
}
pub fn prepare_shared_memory_region(
vm: &mut dyn Vm,
allocator: &mut SystemAllocator,
alloc: Alloc,
) -> Result<(u64, MemSlot), SysError> {
if !matches!(alloc, Alloc::PciBar { .. }) {
return Err(SysError::new(EINVAL));
}
match allocator.mmio_allocator_any().get(&alloc) {
Some((range, _)) => {
let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
Some(v) => v,
None => return Err(SysError::new(ERANGE)),
};
let arena = match MemoryMappingArena::new(size) {
Ok(a) => a,
Err(MmapError::SystemCallFailed(e)) => return Err(e),
_ => return Err(SysError::new(EINVAL)),
};
match vm.add_memory_region(GuestAddress(range.start), Box::new(arena), false, false) {
Ok(slot) => Ok((range.start >> 12, slot)),
Err(e) => Err(e),
}
}
None => Err(SysError::new(EINVAL)),
}
}
static SHOULD_PREPARE_MEMORY_REGION: Lazy<bool> = Lazy::new(|| {
if cfg!(target_arch = "x86_64") {
// The legacy x86 MMU allocates an rmap and a page tracking array
// that take 2.5MiB per 1GiB of user memory region address space,
// so avoid mapping the whole shared memory region if we're not
// using the tdp mmu.
match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") {
Ok(bytes) if bytes.len() > 0 => bytes[0] == b'Y',
_ => false,
}
} else if cfg!(target_pointer_width = "64") {
true
} else {
// Not enough address space on 32-bit systems
false
}
});
pub fn should_prepare_memory_region() -> bool {
*SHOULD_PREPARE_MEMORY_REGION
}
impl FsMappingRequest {
pub fn execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse {
use self::FsMappingRequest::*;
match *self {
AllocateSharedMemoryRegion(Alloc::PciBar {
bus,
dev,
func,
bar,
}) => {
match allocator.mmio_allocator_any().get(&Alloc::PciBar {
bus,
dev,
func,
bar,
}) {
Some((range, _)) => {
let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
Some(v) => v,
None => return VmResponse::Err(SysError::new(ERANGE)),
};
let arena = match MemoryMappingArena::new(size) {
Ok(a) => a,
Err(MmapError::SystemCallFailed(e)) => return VmResponse::Err(e),
_ => return VmResponse::Err(SysError::new(EINVAL)),
};
match vm.add_memory_region(
GuestAddress(range.start),
Box::new(arena),
false,
false,
) {
Ok(slot) => VmResponse::RegisterMemory {
pfn: range.start >> 12,
slot,
},
Err(e) => VmResponse::Err(e),
}
}
None => VmResponse::Err(SysError::new(EINVAL)),
AllocateSharedMemoryRegion(alloc) => {
match prepare_shared_memory_region(vm, allocator, alloc) {
Ok((pfn, slot)) => VmResponse::RegisterMemory { pfn, slot },
Err(e) => VmResponse::Err(e),
}
}
CreateMemoryMapping {
@ -198,7 +217,6 @@ impl FsMappingRequest {
Err(e) => VmResponse::Err(e),
}
}
_ => VmResponse::Err(SysError::new(EINVAL)),
}
}
}

View file

@ -10,8 +10,13 @@ use std::mem::size_of;
use std::path::Path;
use base::named_pipes::OverlappedWrapper;
use base::Error;
use base::Event;
use base::PipeConnection;
use hypervisor::MemSlot;
use hypervisor::Vm;
use resources::Alloc;
use resources::SystemAllocator;
use crate::client::HandleRequestResult;
use crate::VmRequest;
@ -61,3 +66,15 @@ pub fn recv_service_message(
exit_event,
)
}
pub fn should_prepare_memory_region() -> bool {
false
}
pub fn prepare_shared_memory_region(
_vm: &mut dyn Vm,
_allocator: &mut SystemAllocator,
_alloc: Alloc,
) -> std::result::Result<(u64, MemSlot), Error> {
unimplemented!()
}