hypervisor: consolidate to a single balloon Vm trait method

remove `handle_inflate` and `handle_deflate` methods from the trait Vm
and replace with a single `handle_balloon_event` method. this will allow
more balloon events to be handled through the `enum BalloonEvent`.

the change is refactoring and a no-op.

Bug: b:267051826
Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com>
Change-Id: Ia5436eb3b72d6081ac3c05ae461eeb426e5f783c
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4601859
Reviewed-by: Vikram Auradkar <auradkar@google.com>
Reviewed-by: Noah Gold <nkgold@google.com>
This commit is contained in:
Vaibhav Nagarnaik 2023-06-08 19:06:04 +00:00 committed by crosvm LUCI
parent 16625da26a
commit 3fcc52cc94
7 changed files with 171 additions and 148 deletions

View file

@ -58,6 +58,7 @@ use vm_memory::GuestMemory;
use vm_memory::MemoryRegionInformation;
use vm_memory::MemoryRegionPurpose;
use crate::BalloonEvent;
use crate::ClockState;
use crate::Config;
use crate::Datamatch;
@ -868,6 +869,19 @@ impl GeniezoneVm {
errno_result()
}
}
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
match self.guest_mem.remove_range(guest_address, size) {
Ok(_) => Ok(()),
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
Err(_) => Err(Error::new(EIO)),
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
// No-op, when the guest attempts to access the pages again, Linux/GZVM will provide them.
Ok(())
}
}
impl Vm for GeniezoneVm {
@ -1049,18 +1063,12 @@ impl Vm for GeniezoneVm {
}
}
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
match self.guest_mem.remove_range(guest_address, size) {
Ok(_) => Ok(()),
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
Err(_) => Err(Error::new(EIO)),
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
match event {
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
// No-op, when the guest attempts to access the pages again, Linux/GZVM will provide them.
Ok(())
}
}
impl AsRawDescriptor for GeniezoneVm {

View file

@ -609,11 +609,7 @@ impl Vm for GunyahVm {
}
}
fn handle_inflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
unimplemented!()
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
fn handle_balloon_event(&mut self, _event: BalloonEvent) -> Result<()> {
unimplemented!()
}
}

View file

@ -405,14 +405,7 @@ impl Vm for HaxmVm {
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
// TODO(b/233773610): implement ballooning support in haxm
warn!("Memory ballooning attempted but not supported on haxm hypervisor");
// no-op
Ok(())
}
fn handle_inflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
fn handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()> {
// TODO(b/233773610): implement ballooning support in haxm
warn!("Memory ballooning attempted but not supported on haxm hypervisor");
// no-op

View file

@ -68,6 +68,7 @@ use vm_memory::MemoryRegionInformation;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub use x86_64::*;
use crate::BalloonEvent;
use crate::ClockState;
use crate::Config;
use crate::Datamatch;
@ -509,6 +510,19 @@ impl KvmVm {
errno_result()
}
}
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
match self.guest_mem.remove_range(guest_address, size) {
Ok(_) => Ok(()),
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
Err(_) => Err(Error::new(EIO)),
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
// No-op, when the guest attempts to access the pages again, Linux/KVM will provide them.
Ok(())
}
}
impl Vm for KvmVm {
@ -745,18 +759,12 @@ impl Vm for KvmVm {
}
}
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
match self.guest_mem.remove_range(guest_address, size) {
Ok(_) => Ok(()),
Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
Err(_) => Err(Error::new(EIO)),
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
match event {
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
}
}
fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
// No-op, when the guest attempts to access the pages again, Linux/KVM will provide them.
Ok(())
}
}
impl AsRawDescriptor for KvmVm {

View file

@ -50,6 +50,24 @@ pub use crate::x86_64::*;
/// An index in the list of guest-mapped memory regions.
pub type MemSlot = u32;
/// Range of GPA space. Starting from `guest_address` up to `size`.
pub struct MemRegion {
pub guest_address: GuestAddress,
pub size: u64,
}
/// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and
/// requests they be freed. Use without the guest's knowledge is sure to break something.
pub enum BalloonEvent {
/// Balloon event when the region is acquired from the guest. The guest cannot access this
/// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon
/// spec, the given address and size are intended to be page-aligned.
Inflate(MemRegion),
/// Balloon event when the region is returned to the guest. VMM should reallocate memory and
/// register it with the hypervisor for accesses by the guest.
Deflate(MemRegion),
}
/// A trait for checking hypervisor capabilities.
pub trait Hypervisor: Send {
/// Makes a shallow clone of this `Hypervisor`.
@ -189,24 +207,8 @@ pub trait Vm: Send {
/// Remove `size`-byte mapping starting at `offset`.
fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>;
/// Frees the given segment of guest memory to be reclaimed by the host OS.
/// This is intended for use with virtio-balloon, where a guest driver determines
/// unused ranges and requests they be freed. Use without the guest's knowledge is sure
/// to break something. As per virtio-balloon spec, the given address and size
/// are intended to be page-aligned.
///
/// # Arguments
/// * `guest_address` - Address in the guest's "physical" memory to begin the unmapping
/// * `size` - The size of the region to unmap, in bytes
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>;
/// Reallocates memory and maps it to provide to the guest. This is intended to be used
/// exclusively in tandem with `handle_inflate`, and will return an `Err` Result otherwise.
///
/// # Arguments
/// * `guest_address` - Address in the guest's "physical" memory to begin the mapping
/// * `size` - The size of the region to map, in bytes
fn handle_deflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>;
/// Events from virtio-balloon that affect the state for guest memory and host memory.
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>;
}
/// Operation for Io and Mmio

View file

@ -47,6 +47,7 @@ use super::types::*;
use super::*;
use crate::host_phys_addr_bits;
use crate::whpx::whpx_sys::*;
use crate::BalloonEvent;
use crate::ClockState;
use crate::Datamatch;
use crate::DeliveryMode;
@ -342,6 +343,104 @@ impl WhpxVm {
)
})
}
/// In order to fully unmap a memory range such that the host can reclaim the memory,
/// we unmap it from the hypervisor partition, and then mark crosvm's process as uninterested
/// in the memory.
///
/// This will make crosvm unable to access the memory, and allow Windows to reclaim it for other
/// uses when memory is in demand.
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
info!(
"Balloon: Requested WHPX unmap of addr: {:?}, size: {:?}",
guest_address, size
);
// Safe because WHPX does proper error checking, even if an out-of-bounds address is
// provided.
unsafe {
check_whpx!(WHvUnmapGpaRange(
self.vm_partition.partition,
guest_address.offset(),
size,
))?;
}
let host_address = self
.guest_mem
.get_host_address(guest_address)
.map_err(|_| Error::new(1))? as *mut c_void;
// Safe because we have just successfully unmapped this range from the
// guest partition, so we know it's unused.
let result =
unsafe { OfferVirtualMemory(host_address, size as usize, VmOfferPriorityBelowNormal) };
if result != ERROR_SUCCESS {
let err = Error::new(result);
error!("Freeing memory failed with error: {}", err);
return Err(err);
}
Ok(())
}
/// Remap memory that has previously been unmapped with #handle_inflate. Note
/// that attempts to remap pages that were not previously unmapped, or addresses that are not
/// page-aligned, will result in failure.
///
/// To do this, reclaim the memory from Windows first, then remap it into the hypervisor
/// partition. Remapped memory has no guarantee of content, and the guest should not expect
/// it to.
fn handle_deflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
info!(
"Balloon: Requested WHPX unmap of addr: {:?}, size: {:?}",
guest_address, size
);
let host_address = self
.guest_mem
.get_host_address(guest_address)
.map_err(|_| Error::new(1))? as *const c_void;
// Note that we aren't doing any validation here that this range was previously unmapped.
// However, we can avoid that expensive validation by relying on Windows error checking for
// ReclaimVirtualMemory. The call will fail if:
// - If the range is not currently "offered"
// - The range is outside of current guest mem (GuestMemory will fail to convert the
// address)
// In short, security is guaranteed by ensuring the guest can never reclaim ranges it
// hadn't previously forfeited (and even then, the contents will be zeroed).
//
// Safe because the memory ranges in question are managed by Windows, not Rust.
// Also, ReclaimVirtualMemory has built-in error checking for bad parameters.
let result = unsafe { ReclaimVirtualMemory(host_address, size as usize) };
if result == ERROR_BUSY || result == ERROR_SUCCESS {
// In either of these cases, the contents of the reclaimed memory
// are preserved or undefined. Regardless, zero the memory
// to ensure no unintentional memory contents are shared.
//
// Safe because we just reclaimed the region in question and haven't yet remapped
// it to the guest partition, so we know it's unused.
unsafe { RtlZeroMemory(host_address as RawDescriptor, size as usize) };
} else {
let err = Error::new(result);
error!("Reclaiming memory failed with error: {}", err);
return Err(err);
}
// Safe because no-overlap is guaranteed by the success of ReclaimVirtualMemory,
// Which would fail if it was called on areas which were not unmapped.
unsafe {
set_user_memory_region(
&self.vm_partition,
false, // read_only
false, // track dirty pages
guest_address.offset(),
size,
host_address as *mut u8,
)
}
}
}
// Wrapper around WHvMapGpaRange, which creates, modifies, or deletes a mapping
@ -645,101 +744,10 @@ impl Vm for WhpxVm {
}
}
/// In order to fully unmap a memory range such that the host can reclaim the memory,
/// we unmap it from the hypervisor partition, and then mark crosvm's process as uninterested
/// in the memory.
///
/// This will make crosvm unable to access the memory, and allow Windows to reclaim it for other
/// uses when memory is in demand.
fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
info!(
"Balloon: Requested WHPX unmap of addr: {:?}, size: {:?}",
guest_address, size
);
// Safe because WHPX does proper error checking, even if an out-of-bounds address is
// provided.
unsafe {
check_whpx!(WHvUnmapGpaRange(
self.vm_partition.partition,
guest_address.offset(),
size,
))?;
}
let host_address = self
.guest_mem
.get_host_address(guest_address)
.map_err(|_| Error::new(1))? as *mut c_void;
// Safe because we have just successfully unmapped this range from the
// guest partition, so we know it's unused.
let result =
unsafe { OfferVirtualMemory(host_address, size as usize, VmOfferPriorityBelowNormal) };
if result != ERROR_SUCCESS {
let err = Error::new(result);
error!("Freeing memory failed with error: {}", err);
return Err(err);
}
Ok(())
}
/// Remap memory that has previously been unmapped with #handle_inflate. Note
/// that attempts to remap pages that were not previously unmapped, or addresses that are not
/// page-aligned, will result in failure.
///
/// To do this, reclaim the memory from Windows first, then remap it into the hypervisor
/// partition. Remapped memory has no guarantee of content, and the guest should not expect
/// it to.
fn handle_deflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
info!(
"Balloon: Requested WHPX unmap of addr: {:?}, size: {:?}",
guest_address, size
);
let host_address = self
.guest_mem
.get_host_address(guest_address)
.map_err(|_| Error::new(1))? as *const c_void;
// Note that we aren't doing any validation here that this range was previously unmapped.
// However, we can avoid that expensive validation by relying on Windows error checking for
// ReclaimVirtualMemory. The call will fail if:
// - If the range is not currently "offered"
// - The range is outside of current guest mem (GuestMemory will fail to convert the
// address)
// In short, security is guaranteed by ensuring the guest can never reclaim ranges it
// hadn't previously forfeited (and even then, the contents will be zeroed).
//
// Safe because the memory ranges in question are managed by Windows, not Rust.
// Also, ReclaimVirtualMemory has built-in error checking for bad parameters.
let result = unsafe { ReclaimVirtualMemory(host_address, size as usize) };
if result == ERROR_BUSY || result == ERROR_SUCCESS {
// In either of these cases, the contents of the reclaimed memory
// are preserved or undefined. Regardless, zero the memory
// to ensure no unintentional memory contents are shared.
//
// Safe because we just reclaimed the region in question and haven't yet remapped
// it to the guest partition, so we know it's unused.
unsafe { RtlZeroMemory(host_address as RawDescriptor, size as usize) };
} else {
let err = Error::new(result);
error!("Reclaiming memory failed with error: {}", err);
return Err(err);
}
// Safe because no-overlap is guaranteed by the success of ReclaimVirtualMemory,
// Which would fail if it was called on areas which were not unmapped.
unsafe {
set_user_memory_region(
&self.vm_partition,
false, // read_only
false, // track dirty pages
guest_address.offset(),
size,
host_address as *mut u8,
)
fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
match event {
BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
}
}

View file

@ -20,6 +20,8 @@ pub mod gpu;
use base::MemoryMappingBuilderUnix;
#[cfg(windows)]
use base::MemoryMappingBuilderWindows;
use hypervisor::BalloonEvent;
use hypervisor::MemRegion;
pub mod client;
pub mod display;
@ -752,14 +754,20 @@ impl VmMemoryRequest {
DynamicallyFreeMemoryRange {
guest_address,
size,
} => match vm.handle_inflate(guest_address, size) {
} => match vm.handle_balloon_event(BalloonEvent::Inflate(MemRegion {
guest_address,
size,
})) {
Ok(_) => VmMemoryResponse::Ok,
Err(e) => VmMemoryResponse::Err(e),
},
DynamicallyReclaimMemoryRange {
guest_address,
size,
} => match vm.handle_deflate(guest_address, size) {
} => match vm.handle_balloon_event(BalloonEvent::Deflate(MemRegion {
guest_address,
size,
})) {
Ok(_) => VmMemoryResponse::Ok,
Err(e) => VmMemoryResponse::Err(e),
},