From df2625c5992aa88005e42d6ee4d6acd7d01ec049 Mon Sep 17 00:00:00 2001 From: David Stevens Date: Thu, 6 Apr 2023 19:30:38 +0900 Subject: [PATCH] Reland "vm_control: reduce user memory region modifications" This is a reland of commit 22c212d54f1fc7c9854084ffaa697b7890a14b0a This reland avoids using the new pre-mapped memory regions on arm devices, since there is insufficient address space. The new path is still used on aarch64. Original change's description: > vm_control: reduce user memory region modifications > > Reduce how often KVM_SET_USER_MEMORY_REGION is called when the tdp mmu > is enabled. With the tdp mmu, there is no memory overhead from creating > large memory regions (at least until a nested VM is started). Simply > mmap'ing/munmap'ing fds within a pre-created memory region is more > efficient. It also addresses audio jank caused by removing a memory > region. > > Adding this support to VmMemoryRequest will allow FsMappingRequest to be > removed in a later change. > > BUG=b:274037632 > TEST=tast run arc.Boot.vm > TEST=manually launch gedit in crostini > > Change-Id: I2ac02454ecb734c9707b6d67546135134b887527 > Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4402068 > Reviewed-by: Dennis Kempin > Reviewed-by: Daniel Verkamp > Commit-Queue: David Stevens Bug: b:274037632 Change-Id: I5deedfd3a030640f9af950cee675fac0d9a411a0 Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4421352 Reviewed-by: Daniel Verkamp Commit-Queue: Dennis Kempin Reviewed-by: Dennis Kempin --- Cargo.lock | 1 + devices/src/pci/vfio_pci.rs | 36 +++--- devices/src/virtio/vhost/user/proxy.rs | 12 +- devices/src/virtio/virtio_pci_device.rs | 33 +++-- src/crosvm/sys/unix.rs | 2 + src/sys/windows.rs | 5 + vm_control/Cargo.toml | 1 + vm_control/src/lib.rs | 156 ++++++++++++++++++++---- vm_control/src/sys.rs | 2 +- vm_control/src/sys/unix.rs | 94 ++++++++------ vm_control/src/sys/windows.rs | 17 +++ 11 files changed, 263 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 718d60575c..8240b7f3d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2499,6 +2499,7 @@ dependencies = [ "gdbstub_arch", "hypervisor", "libc", + "once_cell", "remain", "resources", "rutabaga_gfx", diff --git a/devices/src/pci/vfio_pci.rs b/devices/src/pci/vfio_pci.rs index e69824ad4a..7bd3acb615 100644 --- a/devices/src/pci/vfio_pci.rs +++ b/devices/src/pci/vfio_pci.rs @@ -32,7 +32,6 @@ use base::RawDescriptor; use base::Tube; use base::WaitContext; use base::WorkerThread; -use hypervisor::MemSlot; use resources::AddressRange; use resources::Alloc; use resources::AllocOptions; @@ -43,6 +42,7 @@ use vfio_sys::*; use vm_control::HotPlugDeviceInfo; use vm_control::HotPlugDeviceType; use vm_control::VmMemoryDestination; +use vm_control::VmMemoryRegionId; use vm_control::VmMemoryRequest; use vm_control::VmMemoryResponse; use vm_control::VmMemorySource; @@ -668,7 +668,7 @@ pub struct VfioPciDevice { #[cfg(feature = "direct")] i2c_devs: HashMap, vcfg_shm_mmap: Option, - mapped_mmio_bars: BTreeMap)>, + mapped_mmio_bars: BTreeMap)>, activated: bool, } @@ -1183,8 +1183,8 @@ impl VfioPciDevice { } } - fn add_bar_mmap(&self, index: u32, bar_addr: u64) -> Vec { - let mut mmaps_slots: Vec = Vec::new(); + fn add_bar_mmap(&self, index: u32, bar_addr: u64) -> Vec { + let mut mmaps_ids: Vec = Vec::new(); if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP != 0 { // the bar storing msix table and pba couldn't mmap. // these bars should be trapped, so that msix could be emulated. @@ -1198,7 +1198,7 @@ impl VfioPciDevice { mmaps = self.remove_bar_mmap_lpss(index, mmaps); } if mmaps.is_empty() { - return mmaps_slots; + return mmaps_ids; } for mmap in mmaps.iter() { @@ -1232,22 +1232,22 @@ impl VfioPciDevice { Err(_) => break, }; match response { - VmMemoryResponse::RegisterMemory { pfn: _, slot } => { - mmaps_slots.push(slot); + VmMemoryResponse::RegisterMemory(id) => { + mmaps_ids.push(id); } _ => break, } } } - mmaps_slots + mmaps_ids } - fn remove_bar_mmap(&self, mmap_slots: &[MemSlot]) { - for mmap_slot in mmap_slots { + fn remove_bar_mmap(&self, mmap_ids: &[VmMemoryRegionId]) { + for mmap_id in mmap_ids { if self .vm_socket_mem - .send(&VmMemoryRequest::UnregisterMemory(*mmap_slot)) + .send(&VmMemoryRequest::UnregisterMemory(*mmap_id)) .is_err() { error!("failed to send UnregisterMemory request"); @@ -1260,8 +1260,8 @@ impl VfioPciDevice { } fn disable_bars_mmap(&mut self) { - for (_, (_, mmap_slots)) in self.mapped_mmio_bars.iter() { - self.remove_bar_mmap(mmap_slots); + for (_, (_, mmap_ids)) in self.mapped_mmio_bars.iter() { + self.remove_bar_mmap(mmap_ids); } self.mapped_mmio_bars.clear(); } @@ -1273,12 +1273,12 @@ impl VfioPciDevice { let bar_idx = mmio_info.bar_index(); let addr = mmio_info.address(); - if let Some((cur_addr, slots)) = self.mapped_mmio_bars.remove(&bar_idx) { + if let Some((cur_addr, ids)) = self.mapped_mmio_bars.remove(&bar_idx) { if cur_addr == addr { - self.mapped_mmio_bars.insert(bar_idx, (cur_addr, slots)); + self.mapped_mmio_bars.insert(bar_idx, (cur_addr, ids)); continue; } else { - self.remove_bar_mmap(&slots); + self.remove_bar_mmap(&ids); } } @@ -1288,8 +1288,8 @@ impl VfioPciDevice { } for (bar_idx, addr) in needs_map.iter() { - let slots = self.add_bar_mmap(*bar_idx as u32, *addr); - self.mapped_mmio_bars.insert(*bar_idx, (*addr, slots)); + let ids = self.add_bar_mmap(*bar_idx as u32, *addr); + self.mapped_mmio_bars.insert(*bar_idx, (*addr, ids)); } } diff --git a/devices/src/virtio/vhost/user/proxy.rs b/devices/src/virtio/vhost/user/proxy.rs index 604b9543a1..4ff445e6c4 100644 --- a/devices/src/virtio/vhost/user/proxy.rs +++ b/devices/src/virtio/vhost/user/proxy.rs @@ -48,8 +48,8 @@ use libc::MSG_PEEK; use resources::Alloc; use sync::Mutex; use uuid::Uuid; -use vm_control::MemSlot; use vm_control::VmMemoryDestination; +use vm_control::VmMemoryRegionId; use vm_control::VmMemoryRequest; use vm_control::VmMemoryResponse; use vm_control::VmMemorySource; @@ -276,7 +276,7 @@ struct Worker { slave_req_helper: SlaveReqHelper>, // Stores memory regions that the worker has asked the main thread to register. - registered_memory: Vec, + registered_memory: Vec, // Channel for backend mesages. slave_req_fd: Option>, @@ -862,9 +862,9 @@ impl Worker { match response { VmMemoryResponse::Ok => Ok(()), - VmMemoryResponse::RegisterMemory { slot, .. } => { + VmMemoryResponse::RegisterMemory(id) => { // Store the registered memory slot so we can unregister it when the thread ends. - self.registered_memory.push(slot); + self.registered_memory.push(id); Ok(()) } VmMemoryResponse::Err(e) => { @@ -1157,8 +1157,8 @@ impl Worker { // Clean up memory regions that the worker registered so that the device can start another // worker later. fn cleanup_registered_memory(&mut self) { - while let Some(slot) = self.registered_memory.pop() { - let req = VmMemoryRequest::UnregisterMemory(slot); + while let Some(id) = self.registered_memory.pop() { + let req = VmMemoryRequest::UnregisterMemory(id); if let Err(e) = self.send_memory_request(&req) { error!("failed to unregister memory slot: {}", e); } diff --git a/devices/src/virtio/virtio_pci_device.rs b/devices/src/virtio/virtio_pci_device.rs index cd4eafbf5e..5fe77c6169 100644 --- a/devices/src/virtio/virtio_pci_device.rs +++ b/devices/src/virtio/virtio_pci_device.rs @@ -8,6 +8,7 @@ use std::sync::Arc; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use acpi_tables::sdt::SDT; use anyhow::anyhow; +use anyhow::bail; use anyhow::Context; use base::error; use base::info; @@ -34,8 +35,8 @@ use virtio_sys::virtio_config::VIRTIO_CONFIG_S_DRIVER_OK; use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FAILED; use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FEATURES_OK; use virtio_sys::virtio_config::VIRTIO_CONFIG_S_NEEDS_RESET; -use vm_control::MemSlot; use vm_control::VmMemoryDestination; +use vm_control::VmMemoryRegionId; use vm_control::VmMemoryRequest; use vm_control::VmMemoryResponse; use vm_control::VmMemorySource; @@ -677,6 +678,8 @@ impl PciDevice for VirtioPciDevice { .take() .expect("missing shared_memory_tube"), alloc, + // See comment VmMemoryRequest::execute + !self.device.expose_shmem_descriptors_with_viommu(), ))); vec![config] @@ -982,15 +985,17 @@ impl Suspendable for VirtioPciDevice { struct VmRequester { tube: Tube, alloc: Alloc, - mappings: BTreeMap, + mappings: BTreeMap, + needs_prepare: bool, } impl VmRequester { - fn new(tube: Tube, alloc: Alloc) -> Self { + fn new(tube: Tube, alloc: Alloc, do_prepare: bool) -> Self { Self { tube, alloc, mappings: BTreeMap::new(), + needs_prepare: do_prepare, } } } @@ -1002,6 +1007,20 @@ impl SharedMemoryMapper for VmRequester { offset: u64, prot: Protection, ) -> anyhow::Result<()> { + if self.needs_prepare { + self.tube + .send(&VmMemoryRequest::PrepareSharedMemoryRegion { alloc: self.alloc }) + .context("failed to send request")?; + match self + .tube + .recv() + .context("failed to recieve request response")? + { + VmMemoryResponse::Ok => (), + e => bail!("unexpected response {:?}", e), + }; + self.needs_prepare = false; + } let request = VmMemoryRequest::RegisterMemory { source, dest: VmMemoryDestination::ExistingAllocation { @@ -1016,8 +1035,8 @@ impl SharedMemoryMapper for VmRequester { .recv() .context("failed to recieve request response")? { - VmMemoryResponse::RegisterMemory { pfn: _, slot } => { - self.mappings.insert(offset, slot); + VmMemoryResponse::RegisterMemory(id) => { + self.mappings.insert(offset, id); Ok(()) } e => Err(anyhow!("unexpected response {:?}", e)), @@ -1025,9 +1044,9 @@ impl SharedMemoryMapper for VmRequester { } fn remove_mapping(&mut self, offset: u64) -> anyhow::Result<()> { - let slot = self.mappings.remove(&offset).context("invalid offset")?; + let id = self.mappings.remove(&offset).context("invalid offset")?; self.tube - .send(&VmMemoryRequest::UnregisterMemory(slot)) + .send(&VmMemoryRequest::UnregisterMemory(id)) .context("failed to send request")?; match self .tube diff --git a/src/crosvm/sys/unix.rs b/src/crosvm/sys/unix.rs index 1d73bab6ca..61884e1465 100644 --- a/src/crosvm/sys/unix.rs +++ b/src/crosvm/sys/unix.rs @@ -2892,6 +2892,7 @@ fn run_control( #[cfg(feature = "balloon")] let mut balloon_wss_id: u64 = 0; let mut registered_evt_tubes: HashMap> = HashMap::new(); + let mut region_state = VmMemoryRegionState::new(); 'wait: loop { let events = { @@ -3254,6 +3255,7 @@ fn run_control( } else { None }, + &mut region_state, ); if let Err(e) = tube.send(&response) { error!("failed to send VmMemoryControlResponse: {}", e); diff --git a/src/sys/windows.rs b/src/sys/windows.rs index f9c7da206d..0ce68f50f0 100644 --- a/src/sys/windows.rs +++ b/src/sys/windows.rs @@ -184,6 +184,7 @@ use tube_transporter::TubeToken; use tube_transporter::TubeTransporterReader; use vm_control::BalloonControlCommand; use vm_control::DeviceControlCommand; +use vm_control::VmMemoryRegionState; use vm_control::VmMemoryRequest; use vm_control::VmRunMode; use vm_memory::GuestAddress; @@ -781,6 +782,7 @@ fn handle_readable_event( vcpu_boxes: &Mutex>>, pvclock_host_tube: &Option, run_mode_arc: &VcpuRunMode, + region_state: &mut VmMemoryRegionState, ) -> Result<(bool, Option)> { match event.token { Token::VmEvent => match vm_evt_rdtube.recv::() { @@ -829,6 +831,7 @@ fn handle_readable_event( &mut sys_allocator_mutex.lock(), gralloc, None, + region_state, ); if let Err(e) = tube.send(&response) { error!("failed to send VmMemoryControlResponse: {}", e); @@ -1043,6 +1046,7 @@ fn run_control( } let mut exit_state = ExitState::Stop; + let mut region_state = VmMemoryRegionState::new(); 'poll: loop { let events = { @@ -1076,6 +1080,7 @@ fn run_control( vcpu_boxes.as_ref(), &pvclock_host_tube, run_mode_arc.as_ref(), + &mut region_state, )?; if let Some(state) = state { exit_state = state; diff --git a/vm_control/Cargo.toml b/vm_control/Cargo.toml index 41e68027ee..3fa1d95942 100644 --- a/vm_control/Cargo.toml +++ b/vm_control/Cargo.toml @@ -19,6 +19,7 @@ gdbstub = { version = "0.6.3", optional = true } gdbstub_arch = { version = "0.2.4", optional = true } hypervisor = { path = "../hypervisor" } libc = "*" +once_cell = "1.7.2" remain = "*" resources = { path = "../resources" } rutabaga_gfx = { path = "../rutabaga_gfx"} diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs index 78d92eee0a..651a7aef43 100644 --- a/vm_control/src/lib.rs +++ b/vm_control/src/lib.rs @@ -25,7 +25,9 @@ pub mod client; pub mod display; pub mod sys; +use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::collections::HashMap; use std::convert::TryInto; use std::fmt; use std::fmt::Display; @@ -50,6 +52,7 @@ use base::info; use base::warn; use base::with_as_descriptor; use base::AsRawDescriptor; +use base::Descriptor; use base::Error as SysError; use base::Event; use base::ExternalMapping; @@ -481,6 +484,9 @@ pub struct IoEventUpdateRequest { #[derive(Serialize, Deserialize)] pub enum VmMemoryRequest { + /// Prepare a shared memory region to make later operations more efficient. This + /// may be a no-op depending on underlying platform support. + PrepareSharedMemoryRegion { alloc: Alloc }, RegisterMemory { /// Source of the memory to register (mapped file descriptor, shared memory region, etc.) source: VmMemorySource, @@ -500,7 +506,7 @@ pub enum VmMemoryRequest { size: u64, }, /// Unregister the given memory slot that was previously registered with `RegisterMemory`. - UnregisterMemory(MemSlot), + UnregisterMemory(VmMemoryRegionId), /// Register an ioeventfd by looking up using Alloc info. IoEventWithAlloc { evt: Event, @@ -529,6 +535,69 @@ impl<'a> VmMemoryRequestIommuClient<'a> { } } +pub struct VmMemoryRegionState { + // alloc -> (pfn, slot) + slot_map: HashMap, + // id -> (slot, Option) + mapped_regions: BTreeMap)>, +} + +impl VmMemoryRegionState { + pub fn new() -> VmMemoryRegionState { + Self { + slot_map: HashMap::new(), + mapped_regions: BTreeMap::new(), + } + } +} + +fn handle_prepared_region( + vm: &mut impl Vm, + region_state: &mut VmMemoryRegionState, + source: &VmMemorySource, + dest: &VmMemoryDestination, + prot: &Protection, +) -> Option { + let VmMemoryDestination::ExistingAllocation { allocation, offset } = dest else { + return None; + }; + + let (pfn, slot) = region_state.slot_map.get(allocation)?; + + let (descriptor, file_offset, size) = match source { + VmMemorySource::Descriptor { + descriptor, + offset, + size, + } => ( + Descriptor(descriptor.as_raw_descriptor()), + *offset, + *size as usize, + ), + VmMemorySource::SharedMemory(shm) => { + let size = shm.size() as usize; + (Descriptor(shm.as_raw_descriptor()), 0, size) + } + _ => return Some(VmMemoryResponse::Err(SysError::new(EINVAL))), + }; + if let Err(err) = vm.add_fd_mapping( + *slot, + *offset as usize, + size, + &descriptor, + file_offset, + *prot, + ) { + return Some(VmMemoryResponse::Err(err)); + } + let pfn = pfn + (offset >> 12); + region_state.mapped_regions.insert( + VmMemoryRegionId(pfn), + (*slot, Some((*offset as usize, size))), + ); + Some(VmMemoryResponse::RegisterMemory(VmMemoryRegionId(pfn))) +} + impl VmMemoryRequest { /// Executes this request on the given Vm. /// @@ -545,10 +614,34 @@ impl VmMemoryRequest { sys_allocator: &mut SystemAllocator, gralloc: &mut RutabagaGralloc, iommu_client: Option<&mut VmMemoryRequestIommuClient>, + region_state: &mut VmMemoryRegionState, ) -> VmMemoryResponse { use self::VmMemoryRequest::*; match self { + PrepareSharedMemoryRegion { alloc } => { + // Currently the iommu_client is only used by virtio-gpu, and virtio-gpu + // is incompatible with PrepareSharedMemoryRegion because we can't use + // add_fd_mapping with VmMemorySource::Vulkan. + assert!(iommu_client.is_none()); + + if !sys::should_prepare_memory_region() { + return VmMemoryResponse::Ok; + } + + match sys::prepare_shared_memory_region(vm, sys_allocator, alloc) { + Ok(info) => { + region_state.slot_map.insert(alloc, info); + VmMemoryResponse::Ok + } + Err(e) => VmMemoryResponse::Err(e), + } + } RegisterMemory { source, dest, prot } => { + if let Some(resp) = handle_prepared_region(vm, region_state, &source, &dest, &prot) + { + return resp; + } + // Correct on Windows because callers of this IPC guarantee descriptor is a mapping // handle. let (mapped_region, size, descriptor) = match source.map(gralloc, prot) { @@ -594,33 +687,43 @@ impl VmMemoryRequest { } let pfn = guest_addr.0 >> 12; - VmMemoryResponse::RegisterMemory { pfn, slot } + region_state + .mapped_regions + .insert(VmMemoryRegionId(pfn), (slot, None)); + VmMemoryResponse::RegisterMemory(VmMemoryRegionId(pfn)) } - UnregisterMemory(slot) => match vm.remove_memory_region(slot) { - Ok(_) => { - if let Some(iommu_client) = iommu_client { - if iommu_client.gpu_memory.remove(&slot) { - let request = VirtioIOMMURequest::VfioCommand( - VirtioIOMMUVfioCommand::VfioDmabufUnmap(slot), - ); + UnregisterMemory(id) => match region_state.mapped_regions.remove(&id) { + Some((slot, None)) => match vm.remove_memory_region(slot) { + Ok(_) => { + if let Some(iommu_client) = iommu_client { + if iommu_client.gpu_memory.remove(&slot) { + let request = VirtioIOMMURequest::VfioCommand( + VirtioIOMMUVfioCommand::VfioDmabufUnmap(slot), + ); - match virtio_iommu_request(iommu_client.tube, &request) { - Ok(VirtioIOMMUResponse::VfioResponse( - VirtioIOMMUVfioResult::Ok, - )) => VmMemoryResponse::Ok, - resp => { - error!("Unexpected message response: {:?}", resp); - VmMemoryResponse::Err(SysError::new(EINVAL)) + match virtio_iommu_request(iommu_client.tube, &request) { + Ok(VirtioIOMMUResponse::VfioResponse( + VirtioIOMMUVfioResult::Ok, + )) => VmMemoryResponse::Ok, + resp => { + error!("Unexpected message response: {:?}", resp); + VmMemoryResponse::Err(SysError::new(EINVAL)) + } } + } else { + VmMemoryResponse::Ok } } else { VmMemoryResponse::Ok } - } else { - VmMemoryResponse::Ok } - } - Err(e) => VmMemoryResponse::Err(e), + Err(e) => VmMemoryResponse::Err(e), + }, + Some((slot, Some((offset, size)))) => match vm.remove_mapping(slot, offset, size) { + Ok(()) => VmMemoryResponse::Ok, + Err(e) => VmMemoryResponse::Err(e), + }, + None => VmMemoryResponse::Err(SysError::new(EINVAL)), }, DynamicallyFreeMemoryRange { guest_address, @@ -693,14 +796,15 @@ impl VmMemoryRequest { } } +#[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Ord, Clone, Copy)] +/// Identifer for registered memory regions. Globally unique. +// The current implementation uses pfn as the unique identifier. +pub struct VmMemoryRegionId(u64); + #[derive(Serialize, Deserialize, Debug)] pub enum VmMemoryResponse { - /// The request to register memory into guest address space was successfully done at page frame - /// number `pfn` and memory slot number `slot`. - RegisterMemory { - pfn: u64, - slot: MemSlot, - }, + /// The request to register memory into guest address space was successful. + RegisterMemory(VmMemoryRegionId), Ok, Err(SysError), } diff --git a/vm_control/src/sys.rs b/vm_control/src/sys.rs index f43f9e8ff9..380ce8c38f 100644 --- a/vm_control/src/sys.rs +++ b/vm_control/src/sys.rs @@ -19,4 +19,4 @@ cfg_if::cfg_if! { } } -pub use platform::handle_request; +pub use platform::{handle_request, prepare_shared_memory_region, should_prepare_memory_region}; diff --git a/vm_control/src/sys/unix.rs b/vm_control/src/sys/unix.rs index 1aa9e191e3..ef8e3d3cc9 100644 --- a/vm_control/src/sys/unix.rs +++ b/vm_control/src/sys/unix.rs @@ -21,6 +21,7 @@ use hypervisor::MemSlot; use hypervisor::Vm; use libc::EINVAL; use libc::ERANGE; +use once_cell::sync::Lazy; use resources::Alloc; use resources::SystemAllocator; use serde::Deserialize; @@ -134,47 +135,65 @@ pub enum FsMappingRequest { }, } +pub fn prepare_shared_memory_region( + vm: &mut dyn Vm, + allocator: &mut SystemAllocator, + alloc: Alloc, +) -> Result<(u64, MemSlot), SysError> { + if !matches!(alloc, Alloc::PciBar { .. }) { + return Err(SysError::new(EINVAL)); + } + match allocator.mmio_allocator_any().get(&alloc) { + Some((range, _)) => { + let size: usize = match range.len().and_then(|x| x.try_into().ok()) { + Some(v) => v, + None => return Err(SysError::new(ERANGE)), + }; + let arena = match MemoryMappingArena::new(size) { + Ok(a) => a, + Err(MmapError::SystemCallFailed(e)) => return Err(e), + _ => return Err(SysError::new(EINVAL)), + }; + + match vm.add_memory_region(GuestAddress(range.start), Box::new(arena), false, false) { + Ok(slot) => Ok((range.start >> 12, slot)), + Err(e) => Err(e), + } + } + None => Err(SysError::new(EINVAL)), + } +} + +static SHOULD_PREPARE_MEMORY_REGION: Lazy = Lazy::new(|| { + if cfg!(target_arch = "x86_64") { + // The legacy x86 MMU allocates an rmap and a page tracking array + // that take 2.5MiB per 1GiB of user memory region address space, + // so avoid mapping the whole shared memory region if we're not + // using the tdp mmu. + match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") { + Ok(bytes) if bytes.len() > 0 => bytes[0] == b'Y', + _ => false, + } + } else if cfg!(target_pointer_width = "64") { + true + } else { + // Not enough address space on 32-bit systems + false + } +}); + +pub fn should_prepare_memory_region() -> bool { + *SHOULD_PREPARE_MEMORY_REGION +} + impl FsMappingRequest { pub fn execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse { use self::FsMappingRequest::*; match *self { - AllocateSharedMemoryRegion(Alloc::PciBar { - bus, - dev, - func, - bar, - }) => { - match allocator.mmio_allocator_any().get(&Alloc::PciBar { - bus, - dev, - func, - bar, - }) { - Some((range, _)) => { - let size: usize = match range.len().and_then(|x| x.try_into().ok()) { - Some(v) => v, - None => return VmResponse::Err(SysError::new(ERANGE)), - }; - let arena = match MemoryMappingArena::new(size) { - Ok(a) => a, - Err(MmapError::SystemCallFailed(e)) => return VmResponse::Err(e), - _ => return VmResponse::Err(SysError::new(EINVAL)), - }; - - match vm.add_memory_region( - GuestAddress(range.start), - Box::new(arena), - false, - false, - ) { - Ok(slot) => VmResponse::RegisterMemory { - pfn: range.start >> 12, - slot, - }, - Err(e) => VmResponse::Err(e), - } - } - None => VmResponse::Err(SysError::new(EINVAL)), + AllocateSharedMemoryRegion(alloc) => { + match prepare_shared_memory_region(vm, allocator, alloc) { + Ok((pfn, slot)) => VmResponse::RegisterMemory { pfn, slot }, + Err(e) => VmResponse::Err(e), } } CreateMemoryMapping { @@ -198,7 +217,6 @@ impl FsMappingRequest { Err(e) => VmResponse::Err(e), } } - _ => VmResponse::Err(SysError::new(EINVAL)), } } } diff --git a/vm_control/src/sys/windows.rs b/vm_control/src/sys/windows.rs index 408dddf436..e1ff804eb7 100644 --- a/vm_control/src/sys/windows.rs +++ b/vm_control/src/sys/windows.rs @@ -10,8 +10,13 @@ use std::mem::size_of; use std::path::Path; use base::named_pipes::OverlappedWrapper; +use base::Error; use base::Event; use base::PipeConnection; +use hypervisor::MemSlot; +use hypervisor::Vm; +use resources::Alloc; +use resources::SystemAllocator; use crate::client::HandleRequestResult; use crate::VmRequest; @@ -61,3 +66,15 @@ pub fn recv_service_message( exit_event, ) } + +pub fn should_prepare_memory_region() -> bool { + false +} + +pub fn prepare_shared_memory_region( + _vm: &mut dyn Vm, + _allocator: &mut SystemAllocator, + _alloc: Alloc, +) -> std::result::Result<(u64, MemSlot), Error> { + unimplemented!() +}