vmm_vhost: SET_DEVICE_STATE support

This replaces the custom SNAPSHOT/RESTORE vhost-user extensions with the
new SET_DEVICE_STATE_FD and CHECK_DEVICE_STATE vhost-user methods.

For now, we keep the custom message types around as a fallback to allow
for a smoother migration for non-crosvm users (there is only one I know
of, so it shouldn't take long).

BUG=b:301269927

Change-Id: Ib3d1d232fdfc92e605c372e778e78d84395704fe
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5735713
Commit-Queue: Frederick Mayle <fmayle@google.com>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
Frederick Mayle 2024-07-19 15:03:10 -07:00 committed by crosvm LUCI
parent bf1615f840
commit 7cbf4ac45a
12 changed files with 432 additions and 47 deletions

View file

@ -61,6 +61,7 @@ impl VhostUserDevice for BlockBackend {
VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::DEVICE_STATE
}
fn read_config(&self, offset: u64, data: &mut [u8]) {

View file

@ -121,6 +121,7 @@ impl VhostUserDevice for GpuBackend {
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS
| VhostUserProtocolFeatures::DEVICE_STATE
}
fn read_config(&self, offset: u64, dst: &mut [u8]) {

View file

@ -64,6 +64,7 @@ use base::Event;
use base::Protection;
use base::SafeDescriptor;
use base::SharedMemory;
use base::WorkerThread;
use cros_async::TaskHandle;
use hypervisor::MemCacheType;
use serde::Deserialize;
@ -80,11 +81,13 @@ use vmm_vhost::message::VhostUserExternalMapMsg;
use vmm_vhost::message::VhostUserGpuMapMsg;
use vmm_vhost::message::VhostUserInflight;
use vmm_vhost::message::VhostUserMemoryRegion;
use vmm_vhost::message::VhostUserMigrationPhase;
use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::message::VhostUserShmemMapMsg;
use vmm_vhost::message::VhostUserShmemMapMsgFlags;
use vmm_vhost::message::VhostUserShmemUnmapMsg;
use vmm_vhost::message::VhostUserSingleMemoryRegion;
use vmm_vhost::message::VhostUserTransferDirection;
use vmm_vhost::message::VhostUserVringAddrFlags;
use vmm_vhost::message::VhostUserVringState;
use vmm_vhost::BackendReq;
@ -330,6 +333,13 @@ pub struct DeviceRequestHandler<T: VhostUserDevice> {
acked_protocol_features: VhostUserProtocolFeatures,
backend: T,
backend_req_connection: Arc<Mutex<VhostBackendReqConnectionState>>,
// Thread processing active device state FD.
device_state_thread: Option<DeviceStateThread>,
}
enum DeviceStateThread {
Save(WorkerThread<serde_json::Result<()>>),
Load(WorkerThread<serde_json::Result<DeviceRequestHandlerSnapshot>>),
}
#[derive(Serialize, Deserialize)]
@ -363,6 +373,7 @@ impl<T: VhostUserDevice> DeviceRequestHandler<T> {
backend_req_connection: Arc::new(Mutex::new(
VhostBackendReqConnectionState::NoConnection,
)),
device_state_thread: None,
}
}
}
@ -711,6 +722,89 @@ impl<T: VhostUserDevice> vmm_vhost::Backend for DeviceRequestHandler<T> {
Ok(())
}
fn set_device_state_fd(
&mut self,
transfer_direction: VhostUserTransferDirection,
migration_phase: VhostUserMigrationPhase,
mut fd: File,
) -> VhostResult<Option<File>> {
if migration_phase != VhostUserMigrationPhase::Stopped {
return Err(VhostError::InvalidOperation);
}
if !self.suspended {
return Err(VhostError::InvalidOperation);
}
if self.device_state_thread.is_some() {
error!("must call check_device_state before starting new state transfer");
return Err(VhostError::InvalidOperation);
}
// `set_device_state_fd` is designed to allow snapshot/restore concurrently with other
// methods, but, for simplicitly, we do those operations inline and only spawn a thread to
// handle the serialization and data transfer (the latter which seems necessary to
// implement the API correctly without, e.g., deadlocking because a pipe is full).
match transfer_direction {
VhostUserTransferDirection::Save => {
// Snapshot the state.
let snapshot = DeviceRequestHandlerSnapshot {
acked_features: self.acked_features,
acked_protocol_features: self.acked_protocol_features.bits(),
backend: self.backend.snapshot().map_err(VhostError::SnapshotError)?,
};
// Spawn thread to write the serialized bytes.
self.device_state_thread = Some(DeviceStateThread::Save(WorkerThread::start(
"device_state_save",
move |_kill_event| serde_json::to_writer(&mut fd, &snapshot),
)));
Ok(None)
}
VhostUserTransferDirection::Load => {
// Spawn a thread to read the bytes and deserialize. Restore will happen in
// `check_device_state`.
self.device_state_thread = Some(DeviceStateThread::Load(WorkerThread::start(
"device_state_load",
move |_kill_event| serde_json::from_reader(&mut fd),
)));
Ok(None)
}
}
}
fn check_device_state(&mut self) -> VhostResult<()> {
let Some(thread) = self.device_state_thread.take() else {
error!("check_device_state: no active state transfer");
return Err(VhostError::InvalidOperation);
};
match thread {
DeviceStateThread::Save(worker) => {
worker.stop().map_err(|e| {
error!("device state save thread failed: {:#}", e);
VhostError::BackendInternalError
})?;
Ok(())
}
DeviceStateThread::Load(worker) => {
let snapshot = worker.stop().map_err(|e| {
error!("device state load thread failed: {:#}", e);
VhostError::BackendInternalError
})?;
self.acked_features = snapshot.acked_features;
self.acked_protocol_features =
VhostUserProtocolFeatures::from_bits(snapshot.acked_protocol_features)
.with_context(|| {
format!(
"unsupported bits in acked_protocol_features: {:#x}",
snapshot.acked_protocol_features
)
})
.map_err(VhostError::RestoreError)?;
self.backend
.restore(snapshot.backend)
.map_err(VhostError::RestoreError)?;
Ok(())
}
}
}
fn get_shared_memory_regions(&mut self) -> VhostResult<Vec<VhostSharedMemoryRegion>> {
Ok(if let Some(r) = self.backend.get_shared_memory_region() {
vec![VhostSharedMemoryRegion::new(r.id, r.length)]
@ -965,6 +1059,7 @@ mod tests {
acked_features: u64,
active_queues: Vec<Option<Queue>>,
allow_backend_req: bool,
supports_device_state: bool,
backend_conn: Option<Arc<VhostBackendReqConnection>>,
}
@ -979,6 +1074,7 @@ mod tests {
acked_features: 0,
active_queues,
allow_backend_req: false,
supports_device_state: false,
backend_conn: None,
}
}
@ -1010,6 +1106,9 @@ mod tests {
if self.allow_backend_req {
features |= VhostUserProtocolFeatures::BACKEND_REQ;
}
if self.supports_device_state {
features |= VhostUserProtocolFeatures::DEVICE_STATE;
}
features
}
@ -1056,16 +1155,30 @@ mod tests {
#[test]
fn test_vhost_user_lifecycle() {
test_vhost_user_lifecycle_parameterized(false);
test_vhost_user_lifecycle_parameterized(false, true);
}
#[test]
fn test_vhost_user_lifecycle_legacy_snapshot() {
test_vhost_user_lifecycle_parameterized(false, false);
}
#[test]
#[cfg(not(windows))] // Windows requries more complex connection setup.
fn test_vhost_user_lifecycle_with_backend_req() {
test_vhost_user_lifecycle_parameterized(true);
test_vhost_user_lifecycle_parameterized(true, true);
}
fn test_vhost_user_lifecycle_parameterized(allow_backend_req: bool) {
#[test]
#[cfg(not(windows))] // Windows requries more complex connection setup.
fn test_vhost_user_lifecycle_with_backend_req_legacy_snapshot() {
test_vhost_user_lifecycle_parameterized(true, false);
}
fn test_vhost_user_lifecycle_parameterized(
allow_backend_req: bool,
supports_device_state: bool,
) {
const QUEUES_NUM: usize = 2;
let (dev, vmm) = test_helpers::setup();
@ -1159,6 +1272,7 @@ mod tests {
// Device side
let mut handler = DeviceRequestHandler::new(FakeBackend::new());
handler.as_mut().allow_backend_req = allow_backend_req;
handler.as_mut().supports_device_state = supports_device_state;
// Notify listener is ready.
ready_tx.send(()).unwrap();
@ -1224,8 +1338,19 @@ mod tests {
handle_request(&mut req_handler, FrontendReq::GET_VRING_BASE).unwrap();
}
handle_request(&mut req_handler, FrontendReq::SNAPSHOT).unwrap();
handle_request(&mut req_handler, FrontendReq::RESTORE).unwrap();
if supports_device_state {
// VhostUserFrontend::virtio_snapshot()
handle_request(&mut req_handler, FrontendReq::SET_DEVICE_STATE_FD).unwrap();
handle_request(&mut req_handler, FrontendReq::CHECK_DEVICE_STATE).unwrap();
// VhostUserFrontend::virtio_restore()
handle_request(&mut req_handler, FrontendReq::SET_DEVICE_STATE_FD).unwrap();
handle_request(&mut req_handler, FrontendReq::CHECK_DEVICE_STATE).unwrap();
} else {
// VhostUserFrontend::virtio_snapshot()
handle_request(&mut req_handler, FrontendReq::SNAPSHOT).unwrap();
// VhostUserFrontend::virtio_restore()
handle_request(&mut req_handler, FrontendReq::RESTORE).unwrap();
}
// VhostUserFrontend::virtio_wake()
handle_request(&mut req_handler, FrontendReq::SET_MEM_TABLE).unwrap();

View file

@ -163,7 +163,7 @@ where
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::DEVICE_STATE
}
fn read_config(&self, offset: u64, data: &mut [u8]) {

View file

@ -157,7 +157,9 @@ impl VhostUserDevice for SndBackend {
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::DEVICE_STATE
}
fn read_config(&self, offset: u64, data: &mut [u8]) {

View file

@ -27,8 +27,10 @@ use vmm_vhost::message::VhostSharedMemoryRegion;
use vmm_vhost::message::VhostUserConfigFlags;
use vmm_vhost::message::VhostUserInflight;
use vmm_vhost::message::VhostUserMemoryRegion;
use vmm_vhost::message::VhostUserMigrationPhase;
use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::message::VhostUserSingleMemoryRegion;
use vmm_vhost::message::VhostUserTransferDirection;
use vmm_vhost::message::VhostUserVringAddrFlags;
use vmm_vhost::message::VhostUserVringState;
use vmm_vhost::Error;
@ -420,6 +422,19 @@ impl vmm_vhost::Backend for VsockBackend {
Err(Error::InvalidOperation)
}
fn set_device_state_fd(
&mut self,
_transfer_direction: VhostUserTransferDirection,
_migration_phase: VhostUserMigrationPhase,
_fd: File,
) -> Result<Option<File>> {
Err(Error::InvalidOperation)
}
fn check_device_state(&mut self) -> Result<()> {
Err(Error::InvalidOperation)
}
fn get_shared_memory_regions(&mut self) -> Result<Vec<VhostSharedMemoryRegion>> {
Ok(vec![])
}

View file

@ -26,38 +26,6 @@ While all vrings are stopped, the device is suspended. In addition to not proces
**NEW: The frontend can assume those requirements are obeyed both (1) before the first queue is
started and (2) as soon as it receives a response for the message that stopped the last queue.**
## Front-end message types
### VHOST_USER_SNAPSHOT
id: 1002 (temporary)
equivalent ioctl: N/A
request payload: N/A
reply payload: i8, followed by (payload size - 1) bytes of opaque snapshot data
Backend should create a snapshot of all state needed to perform a restore.
The first byte of the response should be 1 to indicate success or 0 to indicate failure. The rest of
the response is the snapshot bytes, which are opaque from the perspective of the frontend.
### VHOST_USER_RESTORE
id: 1003 (temporary)
equivalent ioctl: N/A
request payload: (payload size) bytes of opaque snapshot data
reply payload: i8
Backend should restore itself to state of the snapshot provided in the request payload. The request
will contain the exact same bytes returned from a previous VHOST_USER_SNAPSHOT request.
The one byte response should be 1 to indicate success or 0 to indicate failure.
## Snapshot-Restore
TODO: write an overview for the feature
@ -73,7 +41,8 @@ Snapshot sequence:
somewhere.
- Backend enters the "suspended device state" when the last queue is stopped.
1. For each vhost-user device
- Frontend sends VHOST_USER_SNAPSHOT request and saves the response payload somewhere.
- Frontend sends VHOST_USER_SET_DEVICE_STATE_FD and VHOST_USER_CHECK_DEVICE_STATE requests with
transfer direction "save" to save the device state somewhere.
1. For each vhost-user device
- Frontend sends VHOST_USER_SET_MEM_TABLE request.
- Frontend starts all the queues as if from scratch, using the saved vring base in the
@ -89,7 +58,8 @@ Restore sequence:
somewhere.
- Backend enters the "suspended device state" when the last queue is stopped.
1. For each vhost-user device
- Frontend sends VHOST_USER_RESTORE request.
- Frontend sends VHOST_USER_SET_DEVICE_STATE_FD and VHOST_USER_CHECK_DEVICE_STATE requests with
transfer direction "load" restore the device state.
1. For each vhost-user device
- Frontend sends VHOST_USER_SET_MEM_TABLE request.
- Frontend starts all the queues as if from scratch, using the saved vring base in the

View file

@ -12,6 +12,8 @@ mod worker;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::io::Read;
use std::io::Write;
use anyhow::Context;
use base::error;
@ -23,7 +25,9 @@ use base::WorkerThread;
use serde_json::Value;
use vm_memory::GuestMemory;
use vmm_vhost::message::VhostUserConfigFlags;
use vmm_vhost::message::VhostUserMigrationPhase;
use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::message::VhostUserTransferDirection;
use vmm_vhost::BackendClient;
use vmm_vhost::VhostUserMemoryRegionInfo;
use vmm_vhost::VringConfigData;
@ -154,7 +158,8 @@ impl VhostUserFrontend {
let mut allow_protocol_features = VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::BACKEND_REQ;
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::DEVICE_STATE;
// HACK: the crosvm vhost-user GPU backend supports the non-standard
// VHOST_USER_PROTOCOL_FEATURE_SHARED_MEMORY_REGIONS. This should either be standardized
@ -593,15 +598,100 @@ impl VirtioDevice for VhostUserFrontend {
}
fn virtio_snapshot(&mut self) -> anyhow::Result<Value> {
let snapshot_bytes = self.backend_client.snapshot().map_err(Error::Snapshot)?;
let snapshot_bytes = if self
.protocol_features
.contains(VhostUserProtocolFeatures::DEVICE_STATE)
{
// Send the backend an FD to write the device state to. If it gives us an FD back, then
// we need to read from that instead.
let (mut r, w) = new_pipe_pair()?;
let backend_r = self
.backend_client
.set_device_state_fd(
VhostUserTransferDirection::Save,
VhostUserMigrationPhase::Stopped,
&w,
)
.context("failed to negotiate device state fd")?;
// EOF signals end of the device state bytes, so it is important to close our copy of
// the write FD before we start reading.
std::mem::drop(w);
// Read the device state.
let mut buf = Vec::new();
if let Some(mut backend_r) = backend_r {
backend_r.read_to_end(&mut buf)
} else {
r.read_to_end(&mut buf)
}
.context("failed to read device state")?;
// Call `check_device_state` to ensure the data transfer was successful.
self.backend_client
.check_device_state()
.context("failed to transfer device state")?;
buf
} else {
// TODO: Delete fallback to old style snapshot once non-crosvm users are migrated off.
self.backend_client.snapshot().map_err(Error::Snapshot)?
};
Ok(serde_json::to_value(snapshot_bytes).map_err(Error::SliceToSerdeValue)?)
}
fn virtio_restore(&mut self, data: Value) -> anyhow::Result<()> {
let data_bytes: Vec<u8> = serde_json::from_value(data).map_err(Error::SerdeValueToSlice)?;
self.backend_client
.restore(data_bytes.as_slice())
.map_err(Error::Restore)?;
if self
.protocol_features
.contains(VhostUserProtocolFeatures::DEVICE_STATE)
{
// Send the backend an FD to read the device state from. If it gives us an FD back,
// then we need to write to that instead.
let (r, w) = new_pipe_pair()?;
let backend_w = self
.backend_client
.set_device_state_fd(
VhostUserTransferDirection::Load,
VhostUserMigrationPhase::Stopped,
&r,
)
.context("failed to negotiate device state fd")?;
// Write the device state.
{
// EOF signals the end of the device state bytes, so we need to ensure the write
// objects are dropped before the `check_device_state` call. Done here by moving
// them into this scope.
let backend_w = backend_w;
let mut w = w;
if let Some(mut backend_w) = backend_w {
backend_w.write_all(data_bytes.as_slice())
} else {
w.write_all(data_bytes.as_slice())
}
.context("failed to write device state")?;
}
// Call `check_device_state` to ensure the data transfer was successful.
self.backend_client
.check_device_state()
.context("failed to transfer device state")?;
} else {
// TODO: Delete fallback to old style restore once non-crosvm users are migrated off.
self.backend_client
.restore(data_bytes.as_slice())
.map_err(Error::Restore)?;
}
Ok(())
}
}
#[cfg(unix)]
fn new_pipe_pair() -> anyhow::Result<(impl AsRawDescriptor + Read, impl AsRawDescriptor + Write)> {
base::pipe().context("failed to create pipe")
}
#[cfg(windows)]
fn new_pipe_pair() -> anyhow::Result<(impl AsRawDescriptor + Read, impl AsRawDescriptor + Write)> {
base::named_pipes::pair(
&base::named_pipes::FramingMode::Byte,
&base::named_pipes::BlockingMode::Wait,
/* timeout= */ 0,
)
.context("failed to create named pipes")
}

View file

@ -246,6 +246,62 @@ impl BackendClient {
self.wait_for_ack(&hdr)
}
/// Front-end and back-end negotiate a channel over which to transfer the back-ends internal
/// state during migration.
///
/// Requires VHOST_USER_PROTOCOL_F_DEVICE_STATE to be negotiated.
pub fn set_device_state_fd(
&self,
transfer_direction: VhostUserTransferDirection,
migration_phase: VhostUserMigrationPhase,
fd: &impl AsRawDescriptor,
) -> Result<Option<File>> {
if self.acked_protocol_features & VhostUserProtocolFeatures::DEVICE_STATE.bits() == 0 {
return Err(VhostUserError::InvalidOperation);
}
// Send request.
let req = DeviceStateTransferParameters {
transfer_direction: match transfer_direction {
VhostUserTransferDirection::Save => 0,
VhostUserTransferDirection::Load => 1,
},
migration_phase: match migration_phase {
VhostUserMigrationPhase::Stopped => 0,
},
};
let hdr = self.send_request_with_body(
FrontendReq::SET_DEVICE_STATE_FD,
&req,
Some(&[fd.as_raw_descriptor()]),
)?;
// Receive reply.
let (reply, files) = self.recv_reply_with_files::<VhostUserU64>(&hdr)?;
let has_err = reply.value & 0xff != 0;
let invalid_fd = reply.value & 0x100 != 0;
if has_err {
return Err(VhostUserError::BackendInternalError);
}
match (invalid_fd, files.len()) {
(true, 0) => Ok(None),
(false, 1) => Ok(files.into_iter().next()),
_ => Err(VhostUserError::IncorrectFds),
}
}
/// After transferring the back-ends internal state during migration, check whether the
/// back-end was able to successfully fully process the state.
pub fn check_device_state(&self) -> Result<()> {
if self.acked_protocol_features & VhostUserProtocolFeatures::DEVICE_STATE.bits() == 0 {
return Err(VhostUserError::InvalidOperation);
}
let hdr = self.send_request_header(FrontendReq::CHECK_DEVICE_STATE, None)?;
let reply = self.recv_reply::<VhostUserU64>(&hdr)?;
if reply.value != 0 {
return Err(VhostUserError::BackendInternalError);
}
Ok(())
}
/// Snapshot the device and receive serialized state of the device.
pub fn snapshot(&self) -> Result<Vec<u8>> {
let hdr = self.send_request_header(FrontendReq::SNAPSHOT, None)?;

View file

@ -69,6 +69,13 @@ pub trait Backend {
fn get_max_mem_slots(&mut self) -> Result<u64>;
fn add_mem_region(&mut self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()>;
fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> Result<()>;
fn set_device_state_fd(
&mut self,
transfer_direction: VhostUserTransferDirection,
migration_phase: VhostUserMigrationPhase,
fd: File,
) -> Result<Option<File>>;
fn check_device_state(&mut self) -> Result<()>;
fn get_shared_memory_regions(&mut self) -> Result<Vec<VhostSharedMemoryRegion>>;
fn snapshot(&mut self) -> Result<Vec<u8>>;
fn restore(&mut self, data_bytes: &[u8]) -> Result<()>;
@ -191,6 +198,20 @@ where
self.as_mut().remove_mem_region(region)
}
fn set_device_state_fd(
&mut self,
transfer_direction: VhostUserTransferDirection,
migration_phase: VhostUserMigrationPhase,
fd: File,
) -> Result<Option<File>> {
self.as_mut()
.set_device_state_fd(transfer_direction, migration_phase, fd)
}
fn check_device_state(&mut self) -> Result<()> {
self.as_mut().check_device_state()
}
fn get_shared_memory_regions(&mut self) -> Result<Vec<VhostSharedMemoryRegion>> {
self.as_mut().get_shared_memory_regions()
}
@ -350,6 +371,9 @@ impl<S: Backend> BackendServer<S> {
let buf = self.connection.recv_body_bytes(&hdr)?;
let size = buf.len();
// TODO: The error handling here is inconsistent. Sometimes we report the error to the
// client and keep going, sometimes we report the error and then close the connection,
// sometimes we just close the connection.
match hdr.get_code() {
Ok(FrontendReq::SET_OWNER) => {
self.check_request_size(&hdr, size, 0)?;
@ -585,6 +609,57 @@ impl<S: Backend> BackendServer<S> {
self.send_ack_message(&hdr, res.is_ok())?;
res?;
}
Ok(FrontendReq::SET_DEVICE_STATE_FD) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::DEVICE_STATE.bits()
== 0
{
return Err(Error::InvalidOperation);
}
// Read request.
let msg =
self.extract_request_body::<DeviceStateTransferParameters>(&hdr, size, &buf)?;
let transfer_direction = match msg.transfer_direction {
0 => VhostUserTransferDirection::Save,
1 => VhostUserTransferDirection::Load,
_ => return Err(Error::InvalidMessage),
};
let migration_phase = match msg.migration_phase {
0 => VhostUserMigrationPhase::Stopped,
_ => return Err(Error::InvalidMessage),
};
// Call backend.
let res = self.backend.set_device_state_fd(
transfer_direction,
migration_phase,
files.into_iter().next().ok_or(Error::IncorrectFds)?,
);
// Send response.
let (msg, fds) = match &res {
Ok(None) => (VhostUserU64::new(0x100), None),
Ok(Some(file)) => (VhostUserU64::new(0), Some(file.as_raw_descriptor())),
// Just in case, set the "invalid FD" flag on error.
Err(_) => (VhostUserU64::new(0x101), None),
};
let reply_hdr: VhostUserMsgHeader<FrontendReq> =
self.new_reply_header::<VhostUserU64>(&hdr, 0)?;
self.connection.send_message(
&reply_hdr,
&msg,
fds.as_ref().map(std::slice::from_ref),
)?;
res?;
}
Ok(FrontendReq::CHECK_DEVICE_STATE) => {
if self.acked_protocol_features & VhostUserProtocolFeatures::DEVICE_STATE.bits()
== 0
{
return Err(Error::InvalidOperation);
}
let res = self.backend.check_device_state();
let msg = VhostUserU64::new(if res.is_ok() { 0 } else { 1 });
self.send_reply_message(&hdr, &msg)?;
res?;
}
Ok(FrontendReq::GET_SHARED_MEMORY_REGIONS) => {
let regions = self.backend.get_shared_memory_regions()?;
let mut buf = Vec::new();
@ -829,7 +904,8 @@ impl<S: Backend> BackendServer<S> {
| Ok(FrontendReq::SET_LOG_FD)
| Ok(FrontendReq::SET_BACKEND_REQ_FD)
| Ok(FrontendReq::SET_INFLIGHT_FD)
| Ok(FrontendReq::ADD_MEM_REG) => Ok(()),
| Ok(FrontendReq::ADD_MEM_REG)
| Ok(FrontendReq::SET_DEVICE_STATE_FD) => Ok(()),
Err(_) => Err(Error::InvalidMessage),
_ if !files.is_empty() => Err(Error::InvalidMessage),
_ => Ok(()),

View file

@ -144,6 +144,12 @@ pub enum FrontendReq {
/// Query the backend for its device status as defined in the VIRTIO
/// specification.
GET_STATUS = 40,
/// Front-end and back-end negotiate a channel over which to transfer the back-ends internal
/// state during migration.
SET_DEVICE_STATE_FD = 42,
/// After transferring the back-ends internal state during migration, check whether the
/// back-end was able to successfully fully process the state.
CHECK_DEVICE_STATE = 43,
// Non-standard message types.
/// Stop all queue handlers and save each queue state.
@ -441,6 +447,8 @@ bitflags! {
const STATUS = 0x0001_0000;
/// Support Xen mmap.
const XEN_MMAP = 0x0002_0000;
/// Support VHOST_USER_SET_DEVICE_STATE_FD and VHOST_USER_CHECK_DEVICE_STATE messages.
const DEVICE_STATE = 0x0008_0000;
/// Support shared memory regions. (Non-standard.)
const SHARED_MEMORY_REGIONS = 0x8000_0000;
}
@ -818,6 +826,23 @@ impl VhostUserMsgValidator for VhostUserInflight {
}
}
/// VHOST_USER_SET_DEVICE_STATE_FD request payload.
#[repr(C)]
#[derive(Default, Clone, Copy, AsBytes, FromZeroes, FromBytes)]
pub struct DeviceStateTransferParameters {
/// Direction in which the state is transferred
pub transfer_direction: u32,
/// State in which the VM guest and devices are.
pub migration_phase: u32,
}
impl VhostUserMsgValidator for DeviceStateTransferParameters {
fn is_valid(&self) -> bool {
// Validated elsewhere.
true
}
}
/*
* TODO: support dirty log, live migration and IOTLB operations.
#[repr(C, packed)]
@ -1210,6 +1235,17 @@ impl VhostSharedMemoryRegion {
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum VhostUserTransferDirection {
Save,
Load,
}
#[derive(Debug, PartialEq, Eq)]
pub enum VhostUserMigrationPhase {
Stopped,
}
#[cfg(test)]
mod tests {
use super::*;

View file

@ -264,6 +264,19 @@ impl Backend for TestBackend {
Ok(())
}
fn set_device_state_fd(
&mut self,
_transfer_direction: VhostUserTransferDirection,
_migration_phase: VhostUserMigrationPhase,
_fd: File,
) -> Result<Option<File>> {
Ok(None)
}
fn check_device_state(&mut self) -> Result<()> {
Ok(())
}
fn get_shared_memory_regions(&mut self) -> Result<Vec<VhostSharedMemoryRegion>> {
Ok(Vec::new())
}