mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2024-11-24 12:34:31 +00:00
Reformat comments
Test: presubmit Change-Id: I39c261d9985989873b698213c5d8b653fc13757b Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5299850 Auto-Submit: Kaiyi Li <kaiyili@google.com> Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
parent
9c0d3e16e7
commit
c28067d1d9
170 changed files with 835 additions and 749 deletions
|
@ -453,8 +453,8 @@ pub trait LinuxArch {
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `components` - Parts to use to build the VM.
|
||||
/// * `vm_evt_wrtube` - Tube used by sub-devices to request that crosvm exit because guest
|
||||
/// wants to stop/shut down or requested reset.
|
||||
/// * `vm_evt_wrtube` - Tube used by sub-devices to request that crosvm exit because guest wants
|
||||
/// to stop/shut down or requested reset.
|
||||
/// * `system_allocator` - Allocator created by this trait's implementation of
|
||||
/// `get_system_allocator_config`.
|
||||
/// * `serial_parameters` - Definitions for how the serial devices should be configured.
|
||||
|
@ -1288,8 +1288,8 @@ where
|
|||
/// * `image` - The file containing the image to be loaded.
|
||||
/// * `min_guest_addr` - The minimum address of the start of the image.
|
||||
/// * `max_guest_addr` - The address to load the last byte of the image.
|
||||
/// * `align` - The minimum alignment of the start address of the image in bytes
|
||||
/// (must be a power of two).
|
||||
/// * `align` - The minimum alignment of the start address of the image in bytes (must be a power of
|
||||
/// two).
|
||||
///
|
||||
/// The guest address and size in bytes of the loaded image are returned.
|
||||
pub fn load_image_high<F>(
|
||||
|
|
|
@ -94,8 +94,8 @@ pub const SERIAL_ADDR: [u64; 4] = [0x3f8, 0x2f8, 0x3e8, 0x2e8];
|
|||
/// * `com_evt_1_3` - event for com1 and com3
|
||||
/// * `com_evt_1_4` - event for com2 and com4
|
||||
/// * `serial_parameters` - definitions of serial parameter configurations.
|
||||
/// * `serial_jail` - minijail object cloned for use with each serial device.
|
||||
/// All four of the traditional PC-style serial ports (COM1-COM4) must be specified.
|
||||
/// * `serial_jail` - minijail object cloned for use with each serial device. All four of the
|
||||
/// traditional PC-style serial ports (COM1-COM4) must be specified.
|
||||
pub fn add_serial_devices(
|
||||
protection_type: ProtectionType,
|
||||
io_bus: &Bus,
|
||||
|
|
|
@ -179,7 +179,8 @@ mod tests {
|
|||
let layout = Layout::from_size_align(size_of::<u32>() * 15, align_of::<u32>()).unwrap();
|
||||
let allocation = LayoutAllocation::zeroed(layout);
|
||||
// SAFETY:
|
||||
// Slice less than the allocation size, which will return a slice of only the requested length.
|
||||
// Slice less than the allocation size, which will return a slice of only the requested
|
||||
// length.
|
||||
let slice: &[u32] = unsafe { allocation.as_slice(15) };
|
||||
assert_eq!(slice.len(), 15);
|
||||
assert_eq!(slice[0], 0);
|
||||
|
@ -192,7 +193,8 @@ mod tests {
|
|||
let allocation = LayoutAllocation::zeroed(layout);
|
||||
|
||||
// SAFETY:
|
||||
// Slice less than the allocation size, which will return a slice of only the requested length.
|
||||
// Slice less than the allocation size, which will return a slice of only the requested
|
||||
// length.
|
||||
let slice: &[u32] = unsafe { allocation.as_slice(5) };
|
||||
assert_eq!(slice.len(), 5);
|
||||
}
|
||||
|
@ -203,7 +205,8 @@ mod tests {
|
|||
let allocation = LayoutAllocation::zeroed(layout);
|
||||
|
||||
// SAFETY:
|
||||
// Slice more than the allocation size, which will clamp the returned slice len to the limit.
|
||||
// Slice more than the allocation size, which will clamp the returned slice len to the
|
||||
// limit.
|
||||
let slice: &[u32] = unsafe { allocation.as_slice(100) };
|
||||
assert_eq!(slice.len(), 15);
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ use crate::Result;
|
|||
/// - Uses eventfd on Linux.
|
||||
/// - Uses synchapi event objects on Windows.
|
||||
/// - The `Event` and `WaitContext` APIs together cannot easily be implemented with the same
|
||||
/// semantics on all platforms. In particular, it is difficult to support multiple readers, so only
|
||||
/// a single reader is allowed for now. Multiple readers will result in undefined behavior.
|
||||
/// semantics on all platforms. In particular, it is difficult to support multiple readers, so
|
||||
/// only a single reader is allowed for now. Multiple readers will result in undefined behavior.
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct Event(pub(crate) PlatformEvent);
|
||||
|
|
|
@ -172,7 +172,8 @@ pub trait FileReadWriteAtVolatile {
|
|||
/// method must behave as a single call to `read_at_volatile` with the buffers concatenated
|
||||
/// would. The default implementation calls `read_at_volatile` with either the first nonempty
|
||||
/// buffer provided, or returns `Ok(0)` if none exists.
|
||||
/// On Windows file pointer will update with the read, but on Linux the file pointer will not change.
|
||||
/// On Windows file pointer will update with the read, but on Linux the file pointer will not
|
||||
/// change.
|
||||
fn read_vectored_at_volatile(&mut self, bufs: &[VolatileSlice], offset: u64) -> Result<usize> {
|
||||
if let Some(&slice) = bufs.first() {
|
||||
self.read_at_volatile(slice, offset)
|
||||
|
@ -182,8 +183,8 @@ pub trait FileReadWriteAtVolatile {
|
|||
}
|
||||
|
||||
/// Reads bytes from this file at `offset` into the given slice until all bytes in the slice are
|
||||
/// read, or an error is returned. On Windows file pointer will update with the read, but on Linux the
|
||||
/// file pointer will not change.
|
||||
/// read, or an error is returned. On Windows file pointer will update with the read, but on
|
||||
/// Linux the file pointer will not change.
|
||||
fn read_exact_at_volatile(&mut self, mut slice: VolatileSlice, mut offset: u64) -> Result<()> {
|
||||
while slice.size() > 0 {
|
||||
match self.read_at_volatile(slice, offset) {
|
||||
|
@ -209,7 +210,8 @@ pub trait FileReadWriteAtVolatile {
|
|||
/// consumed. This method must behave as a call to `write_at_volatile` with the buffers
|
||||
/// concatenated would. The default implementation calls `write_at_volatile` with either the
|
||||
/// first nonempty buffer provided, or returns `Ok(0)` if none exists.
|
||||
/// On Windows file pointer will update with the write, but on Linux the file pointer will not change.
|
||||
/// On Windows file pointer will update with the write, but on Linux the file pointer will not
|
||||
/// change.
|
||||
fn write_vectored_at_volatile(&mut self, bufs: &[VolatileSlice], offset: u64) -> Result<usize> {
|
||||
if let Some(&slice) = bufs.first() {
|
||||
self.write_at_volatile(slice, offset)
|
||||
|
|
|
@ -64,8 +64,8 @@ impl PlatformEvent {
|
|||
}
|
||||
Ok(PlatformEvent {
|
||||
// SAFETY:
|
||||
// This is safe because we checked ret for success and know the kernel gave us an fd that we
|
||||
// own.
|
||||
// This is safe because we checked ret for success and know the kernel gave us an fd
|
||||
// that we own.
|
||||
event_handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
|
||||
})
|
||||
}
|
||||
|
|
|
@ -575,7 +575,6 @@ pub fn max_open_files() -> Result<u64> {
|
|||
}
|
||||
|
||||
/// Moves the requested PID/TID to a particular cgroup
|
||||
///
|
||||
pub fn move_to_cgroup(cgroup_path: PathBuf, id_to_write: Pid, cgroup_file: &str) -> Result<()> {
|
||||
use std::io::Write;
|
||||
|
||||
|
|
|
@ -83,8 +83,8 @@ pub(in crate::sys) fn sockaddr_un<P: AsRef<Path>>(
|
|||
|
||||
// Check if the input path is valid. Since
|
||||
// * The pathname in sun_path should be null-terminated.
|
||||
// * The length of the pathname, including the terminating null byte,
|
||||
// should not exceed the size of sun_path.
|
||||
// * The length of the pathname, including the terminating null byte, should not exceed the size
|
||||
// of sun_path.
|
||||
//
|
||||
// and our input is a `Path`, we only need to check
|
||||
// * If the string size of `Path` should less than sizeof(sun_path)
|
||||
|
|
|
@ -366,9 +366,14 @@ fn parse_ctrl_group_name_and_id(
|
|||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `nl_attr_area` - Nested attributes area (CTRL_ATTR_MCAST_GROUPS data), where nl_attr's
|
||||
/// corresponding to specific groups are embed
|
||||
/// * `group_name` - String with group_name for which we are looking group_id
|
||||
/// * `nl_attr_area`
|
||||
///
|
||||
/// Nested attributes area (CTRL_ATTR_MCAST_GROUPS data), where nl_attr's corresponding to
|
||||
/// specific groups are embed
|
||||
///
|
||||
/// * `group_name`
|
||||
///
|
||||
/// String with group_name for which we are looking group_id
|
||||
///
|
||||
/// the CTRL_ATTR_MCAST_GROUPS data has nested attributes. Each of nested attribute is per
|
||||
/// multicast group attributes, which have another nested attributes: CTRL_ATTR_MCAST_GRP_NAME and
|
||||
|
@ -473,7 +478,6 @@ impl NetlinkGenericRead {
|
|||
/// ...
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
pub fn get_multicast_group_id(&self, group_name: String) -> Option<u32> {
|
||||
for netlink_msg in self.iter() {
|
||||
debug_pr!(
|
||||
|
|
|
@ -132,8 +132,8 @@ pub(in crate::sys) fn sockaddr_un<P: AsRef<Path>>(
|
|||
|
||||
// Check if the input path is valid. Since
|
||||
// * The pathname in sun_path should be null-terminated.
|
||||
// * The length of the pathname, including the terminating null byte,
|
||||
// should not exceed the size of sun_path.
|
||||
// * The length of the pathname, including the terminating null byte, should not exceed the size
|
||||
// of sun_path.
|
||||
//
|
||||
// and our input is a `Path`, we only need to check
|
||||
// * If the string size of `Path` should less than sizeof(sun_path)
|
||||
|
|
|
@ -13,8 +13,8 @@ use std::os::unix::net::UnixStream;
|
|||
use crate::FileReadWriteAtVolatile;
|
||||
use crate::FileReadWriteVolatile;
|
||||
|
||||
// This module allows the below macros to refer to $crate::unix::file_traits::lib::X and ensures other
|
||||
// crates don't need to add additional crates to their Cargo.toml.
|
||||
// This module allows the below macros to refer to $crate::unix::file_traits::lib::X and ensures
|
||||
// other crates don't need to add additional crates to their Cargo.toml.
|
||||
pub mod lib {
|
||||
pub use libc::c_int;
|
||||
pub use libc::c_void;
|
||||
|
|
|
@ -33,8 +33,10 @@ impl<T> InterruptibleResult for io::Result<T> {
|
|||
///
|
||||
/// The given expression `$x` can return
|
||||
///
|
||||
/// * `crate::linux::Result` in which case the expression is retried if the `Error::errno()` is `EINTR`.
|
||||
/// * `std::io::Result` in which case the expression is retried if the `ErrorKind` is `ErrorKind::Interrupted`.
|
||||
/// * `crate::linux::Result` in which case the expression is retried if the `Error::errno()` is
|
||||
/// `EINTR`.
|
||||
/// * `std::io::Result` in which case the expression is retried if the `ErrorKind` is
|
||||
/// `ErrorKind::Interrupted`.
|
||||
///
|
||||
/// Note that if expression returns i32 (i.e. either -1 or error code), then handle_eintr_errno()
|
||||
/// or handle_eintr_rc() should be used instead.
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//! The mmap module provides a safe interface to map memory and ensures UnmapViewOfFile is called when the
|
||||
//! mmap object leaves scope.
|
||||
//! The mmap module provides a safe interface to map memory and ensures UnmapViewOfFile is called
|
||||
//! when the mmap object leaves scope.
|
||||
|
||||
use libc::c_void;
|
||||
use win_util::get_high_order;
|
||||
|
|
|
@ -68,14 +68,14 @@ use crate::WaitContext;
|
|||
///
|
||||
/// The general rule is this should be *at least* as big as the largest message, otherwise
|
||||
/// unexpected blocking behavior can result; for example, if too small, this can interact badly with
|
||||
/// crate::windows::StreamChannel, which expects to be able to make a complete write before releasing
|
||||
/// a lock that the opposite side needs to complete a read. This means that if the buffer is too
|
||||
/// small:
|
||||
/// crate::windows::StreamChannel, which expects to be able to make a complete write before
|
||||
/// releasing a lock that the opposite side needs to complete a read. This means that if the buffer
|
||||
/// is too small:
|
||||
/// * The writer can't complete its write and release the lock because the buffer is too small.
|
||||
/// * The reader can't start reading because the lock is held by the writer, so it can't
|
||||
/// relieve buffer pressure. Note that for message pipes, the reader couldn't do anything
|
||||
/// to help anyway, because a message mode pipe should NOT have a partial read (which is
|
||||
/// what we would need to relieve pressure).
|
||||
/// * The reader can't start reading because the lock is held by the writer, so it can't relieve
|
||||
/// buffer pressure. Note that for message pipes, the reader couldn't do anything to help
|
||||
/// anyway, because a message mode pipe should NOT have a partial read (which is what we would
|
||||
/// need to relieve pressure).
|
||||
/// * Conditions for deadlock are met, and both the reader & writer enter circular waiting.
|
||||
pub const DEFAULT_BUFFER_SIZE: usize = 50 * 1024;
|
||||
|
||||
|
@ -294,17 +294,15 @@ pub fn pair(
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `framing_mode` - Whether the system should provide a simple byte stream (Byte) or an
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an
|
||||
/// error to read fewer bytes than were sent in a message from the other end of
|
||||
/// the pipe.
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an error to read
|
||||
/// fewer bytes than were sent in a message from the other end of the pipe.
|
||||
/// * `blocking_mode` - Whether the system should wait on read() until data is available (Wait) or
|
||||
/// return immediately if there is nothing available (NoWait).
|
||||
/// * `timeout` - A timeout to apply for socket operations, in milliseconds.
|
||||
/// Setting this to zero will create sockets with the system
|
||||
/// default timeout.
|
||||
/// * `timeout` - A timeout to apply for socket operations, in milliseconds. Setting this to
|
||||
/// zero will create sockets with the system default timeout.
|
||||
/// * `buffer_size` - The default buffer size for the named pipe. The system should expand the
|
||||
/// buffer automatically as needed, except in the case of NOWAIT pipes, where
|
||||
/// it will just fail writes that don't fit in the buffer.
|
||||
/// buffer automatically as needed, except in the case of NOWAIT pipes, where it will just fail
|
||||
/// writes that don't fit in the buffer.
|
||||
/// # Return value
|
||||
///
|
||||
/// Returns a pair of pipes, of the form (server, client). Note that for some winapis, such as
|
||||
|
@ -356,17 +354,15 @@ pub fn pair_with_buffer_size(
|
|||
/// * `pipe_name` - The path of the named pipe to create. Should be in the form
|
||||
/// `\\.\pipe\<some-name>`.
|
||||
/// * `framing_mode` - Whether the system should provide a simple byte stream (Byte) or an
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an
|
||||
/// error to read fewer bytes than were sent in a message from the other end of
|
||||
/// the pipe.
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an error to read
|
||||
/// fewer bytes than were sent in a message from the other end of the pipe.
|
||||
/// * `blocking_mode` - Whether the system should wait on read() until data is available (Wait) or
|
||||
/// return immediately if there is nothing available (NoWait).
|
||||
/// * `timeout` - A timeout to apply for socket operations, in milliseconds.
|
||||
/// Setting this to zero will create sockets with the system
|
||||
/// default timeout.
|
||||
/// * `timeout` - A timeout to apply for socket operations, in milliseconds. Setting this to
|
||||
/// zero will create sockets with the system default timeout.
|
||||
/// * `buffer_size` - The default buffer size for the named pipe. The system should expand the
|
||||
/// buffer automatically as needed, except in the case of NOWAIT pipes, where
|
||||
/// it will just fail writes that don't fit in the buffer.
|
||||
/// buffer automatically as needed, except in the case of NOWAIT pipes, where it will just fail
|
||||
/// writes that don't fit in the buffer.
|
||||
/// * `overlapped` - Sets whether overlapped mode is set on the pipe.
|
||||
pub fn create_server_pipe(
|
||||
pipe_name: &str,
|
||||
|
@ -435,9 +431,8 @@ pub fn create_server_pipe(
|
|||
/// * `pipe_name` - The path of the named pipe to create. Should be in the form
|
||||
/// `\\.\pipe\<some-name>`.
|
||||
/// * `framing_mode` - Whether the system should provide a simple byte stream (Byte) or an
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an
|
||||
/// error to read fewer bytes than were sent in a message from the other end of
|
||||
/// the pipe.
|
||||
/// automatically framed sequence of messages (Message). In message mode it's an error to read
|
||||
/// fewer bytes than were sent in a message from the other end of the pipe.
|
||||
/// * `blocking_mode` - Whether the system should wait on read() until data is available (Wait) or
|
||||
/// return immediately if there is nothing available (NoWait).
|
||||
/// * `overlapped` - Sets whether the pipe is opened in overlapped mode.
|
||||
|
@ -711,9 +706,9 @@ impl PipeConnection {
|
|||
/// (can be created with `OverlappedWrapper::new`) will be passed into
|
||||
/// `WriteFile`. That event will be triggered when the write operation is complete.
|
||||
///
|
||||
/// In order to get how many bytes were written, call `get_overlapped_result`. That function will
|
||||
/// also help with waiting until the write operation is complete. The pipe must be opened in
|
||||
/// overlapped otherwise there may be unexpected behavior.
|
||||
/// In order to get how many bytes were written, call `get_overlapped_result`. That function
|
||||
/// will also help with waiting until the write operation is complete. The pipe must be
|
||||
/// opened in overlapped otherwise there may be unexpected behavior.
|
||||
///
|
||||
/// # Safety
|
||||
/// * buf & overlapped_wrapper MUST live until the overlapped operation is complete.
|
||||
|
@ -1070,8 +1065,8 @@ pub struct NamedPipeInfo {
|
|||
/// we ensure that the variable size message is written/read right after writing/reading
|
||||
/// fixed size header. For example it avoid sending or receiving in messages in order like
|
||||
/// H1, H2, M1, M2
|
||||
/// - where header H1 and its message M1 are sent by one event loop and H2 and its
|
||||
/// message M2 are sent by another event loop.
|
||||
/// - where header H1 and its message M1 are sent by one event loop and H2 and its message M2 are
|
||||
/// sent by another event loop.
|
||||
///
|
||||
/// Do not expose direct access to reader or writer pipes.
|
||||
///
|
||||
|
|
|
@ -156,10 +156,12 @@ pub fn set_time_period(res: Duration, begin: bool) -> Result<()> {
|
|||
}
|
||||
|
||||
let ret = if begin {
|
||||
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range.
|
||||
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's
|
||||
// range.
|
||||
unsafe { timeBeginPeriod(res.as_millis() as u32) }
|
||||
} else {
|
||||
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range.
|
||||
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's
|
||||
// range.
|
||||
unsafe { timeEndPeriod(res.as_millis() as u32) }
|
||||
};
|
||||
if ret != TIMERR_NOERROR {
|
||||
|
|
|
@ -21,7 +21,8 @@ pub fn set_audio_thread_priority() -> Result<SafeMultimediaHandle> {
|
|||
let multimedia_handle = unsafe {
|
||||
let mut task_index: u32 = 0;
|
||||
// "Pro Audio" is defined in:
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Multimedia\SystemProfile\Tasks\Pro Audio
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows
|
||||
// NT\CurrentVersion\Multimedia\SystemProfile\Tasks\Pro Audio
|
||||
let pro_audio = std::ffi::CString::new("Pro Audio").unwrap();
|
||||
AvSetMmThreadCharacteristicsA(pro_audio.as_ptr(), &mut task_index)
|
||||
};
|
||||
|
@ -58,8 +59,8 @@ impl Drop for SafeMultimediaHandle {
|
|||
fn drop(&mut self) {
|
||||
// SAFETY:
|
||||
// Safe because we `multimedia_handle` is defined in the same thread and is created in the
|
||||
// function above. `multimedia_handle` needs be created from `AvSetMmThreadCharacteristicsA`.
|
||||
// This will also drop the `mulitmedia_handle`.
|
||||
// function above. `multimedia_handle` needs be created from
|
||||
// `AvSetMmThreadCharacteristicsA`. This will also drop the `mulitmedia_handle`.
|
||||
if unsafe { AvRevertMmThreadCharacteristics(self.multimedia_handle) } == FALSE {
|
||||
warn!(
|
||||
"Failed to revert audio thread. Error: {}",
|
||||
|
|
|
@ -219,8 +219,8 @@ impl StreamChannel {
|
|||
// the notifier though, then we have to be sure, so we'll proceed to the next section.
|
||||
let byte_count = self.get_readable_byte_count()?;
|
||||
if byte_count > 0 {
|
||||
// It's always safe to set the read notifier here because we know there is data in the
|
||||
// pipe, and no one else could read it out from under us.
|
||||
// It's always safe to set the read notifier here because we know there is data in
|
||||
// the pipe, and no one else could read it out from under us.
|
||||
self.read_notify.signal().map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
|
|
|
@ -45,7 +45,8 @@ pub unsafe trait Terminal {
|
|||
& !(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT);
|
||||
|
||||
// SAFETY:
|
||||
// Safe because the syscall will only read the extent of mode and we check the return result.
|
||||
// Safe because the syscall will only read the extent of mode and we check the return
|
||||
// result.
|
||||
if unsafe { SetConsoleMode(descriptor, new_mode) } == 0 {
|
||||
return Err(Error::last());
|
||||
}
|
||||
|
@ -56,7 +57,8 @@ pub unsafe trait Terminal {
|
|||
/// Set this terminal's mode to a previous state returned by `set_raw_mode()`.
|
||||
fn restore_mode(&self, mode: DWORD) -> Result<()> {
|
||||
// SAFETY:
|
||||
// Safe because the syscall will only read the extent of mode and we check the return result.
|
||||
// Safe because the syscall will only read the extent of mode and we check the return
|
||||
// result.
|
||||
if unsafe { SetConsoleMode(self.terminal_descriptor(), mode) } == 0 {
|
||||
Err(Error::last())
|
||||
} else {
|
||||
|
|
|
@ -275,9 +275,9 @@ fn duplicate_handle(desc: RawHandle, target_pid: Option<u32>) -> Result<RawHandl
|
|||
}
|
||||
|
||||
/// Reads a part of a Tube packet asserting that it was correctly read. This means:
|
||||
/// * Treats partial "message" (transport framing) reads are Ok, as long as we filled our buffer.
|
||||
/// We use this to ignore errors when reading the message header, which has the lengths we need
|
||||
/// to allocate our buffers for the remainder of the message.
|
||||
/// * Treats partial "message" (transport framing) reads are Ok, as long as we filled our buffer. We
|
||||
/// use this to ignore errors when reading the message header, which has the lengths we need to
|
||||
/// allocate our buffers for the remainder of the message.
|
||||
/// * We filled the supplied buffer.
|
||||
fn perform_read<F: FnMut(&mut [u8]) -> io::Result<usize>>(
|
||||
read_fn: &mut F,
|
||||
|
|
|
@ -215,24 +215,26 @@ impl<T: EventToken> EventContext<T> {
|
|||
WAIT_OBJECT_0..=MAXIMUM_WAIT_OBJECTS_U32 => {
|
||||
let mut event_index = (result - WAIT_OBJECT_0) as usize;
|
||||
if event_index >= handles_len {
|
||||
// This is not a valid index and should return an error. This case should not be possible
|
||||
// and will likely not return a meaningful system error code, but is still an invalid case.
|
||||
// This is not a valid index and should return an error. This case should not be
|
||||
// possible and will likely not return a meaningful system
|
||||
// error code, but is still an invalid case.
|
||||
error!("Wait returned index out of range");
|
||||
return errno_result();
|
||||
}
|
||||
if event_index == 0 {
|
||||
// The handles list has been modified and triggered the wait, try again with the updated
|
||||
// handles list. Note it is possible the list was modified again after the wait which will
|
||||
// trigger the handles_modified_event again, but that will only err towards the safe side
|
||||
// The handles list has been modified and triggered the wait, try again with the
|
||||
// updated handles list. Note it is possible the list was
|
||||
// modified again after the wait which will trigger the
|
||||
// handles_modified_event again, but that will only err towards the safe side
|
||||
// of recursing an extra time.
|
||||
let _ = self.handles_modified_event.wait();
|
||||
return self.wait_timeout(timeout);
|
||||
}
|
||||
|
||||
let mut events_to_return = SmallVec::<[TriggeredEvent<T>; 16]>::new();
|
||||
// Multiple events may be triggered at once, but WaitForMultipleObjects will only return one.
|
||||
// Once it returns, loop through the remaining triggers checking each to ensure they haven't
|
||||
// also been triggered.
|
||||
// Multiple events may be triggered at once, but WaitForMultipleObjects will only
|
||||
// return one. Once it returns, loop through the remaining triggers
|
||||
// checking each to ensure they haven't also been triggered.
|
||||
let mut handles_offset: usize = 0;
|
||||
loop {
|
||||
let event_to_return = raw_handles_list[event_index + handles_offset];
|
||||
|
@ -270,8 +272,9 @@ impl<T: EventToken> EventContext<T> {
|
|||
) as usize;
|
||||
|
||||
if event_index >= (handles_len - handles_offset) {
|
||||
// This indicates a failure condition, as return values greater than the length
|
||||
// of the provided array are reserved for failures.
|
||||
// This indicates a failure condition, as return values greater than the
|
||||
// length of the provided array are reserved for
|
||||
// failures.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,8 +42,6 @@
|
|||
//!
|
||||
//! init_with(cfg).unwrap();
|
||||
//! error!("something went horribly wrong: {}", "out of RAMs");
|
||||
//!
|
||||
//!
|
||||
//! ```
|
||||
//!
|
||||
//!
|
||||
|
|
|
@ -116,7 +116,6 @@
|
|||
//! self.0 << 4
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! ```
|
||||
//!
|
||||
//! Finally, fields may be of user-defined enum types. The enum must satisfy one of the following
|
||||
|
|
|
@ -183,8 +183,8 @@ pub trait ShmStreamSource<E: std::error::Error>: Send {
|
|||
/// Creates a new [`ShmStream`](ShmStream)
|
||||
///
|
||||
/// Creates a new `ShmStream` object, which allows:
|
||||
/// * Waiting until the server has communicated that data is ready or
|
||||
/// requested that we make more data available.
|
||||
/// * Waiting until the server has communicated that data is ready or requested that we make
|
||||
/// more data available.
|
||||
/// * Setting the location and length of buffers for reading/writing audio data.
|
||||
///
|
||||
/// # Arguments
|
||||
|
@ -193,15 +193,13 @@ pub trait ShmStreamSource<E: std::error::Error>: Send {
|
|||
/// * `num_channels` - The number of audio channels for the stream.
|
||||
/// * `format` - The audio format to use for audio samples.
|
||||
/// * `frame_rate` - The stream's frame rate in Hz.
|
||||
/// * `buffer_size` - The maximum size of an audio buffer. This will be the
|
||||
/// size used for transfers of audio data between client
|
||||
/// and server.
|
||||
/// * `buffer_size` - The maximum size of an audio buffer. This will be the size used for
|
||||
/// transfers of audio data between client and server.
|
||||
/// * `effects` - Audio effects to use for the stream, such as echo-cancellation.
|
||||
/// * `client_shm` - The shared memory area that will contain samples.
|
||||
/// * `buffer_offsets` - The two initial values to use as buffer offsets
|
||||
/// for streams. This way, the server will not write
|
||||
/// audio data to an arbitrary offset in `client_shm`
|
||||
/// if the client fails to update offsets in time.
|
||||
/// * `buffer_offsets` - The two initial values to use as buffer offsets for streams. This way,
|
||||
/// the server will not write audio data to an arbitrary offset in `client_shm` if the client
|
||||
/// fails to update offsets in time.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
|
|
|
@ -10,16 +10,15 @@
|
|||
//! return a PoisonError. This API codifies our error handling strategy around
|
||||
//! poisoned mutexes in crosvm.
|
||||
//!
|
||||
//! - Crosvm releases are built with panic=abort so poisoning never occurs. A
|
||||
//! panic while a mutex is held (or ever) takes down the entire process. Thus
|
||||
//! we would like for code not to have to consider the possibility of poison.
|
||||
//! - Crosvm releases are built with panic=abort so poisoning never occurs. A panic while a mutex is
|
||||
//! held (or ever) takes down the entire process. Thus we would like for code not to have to
|
||||
//! consider the possibility of poison.
|
||||
//!
|
||||
//! - We could ask developers to always write `.lock().unwrap()` on a standard
|
||||
//! library mutex. However, we would like to stigmatize the use of unwrap. It
|
||||
//! is confusing to permit unwrap but only on mutex lock results. During code
|
||||
//! review it may not always be obvious whether a particular unwrap is
|
||||
//! unwrapping a mutex lock result or a different error that should be handled
|
||||
//! in a more principled way.
|
||||
//! - We could ask developers to always write `.lock().unwrap()` on a standard library mutex.
|
||||
//! However, we would like to stigmatize the use of unwrap. It is confusing to permit unwrap but
|
||||
//! only on mutex lock results. During code review it may not always be obvious whether a
|
||||
//! particular unwrap is unwrapping a mutex lock result or a different error that should be
|
||||
//! handled in a more principled way.
|
||||
//!
|
||||
//! Developers should feel free to use sync::Mutex anywhere in crosvm that they
|
||||
//! would otherwise be using std::sync::Mutex.
|
||||
|
|
|
@ -22,8 +22,7 @@ use crate::BlockingPool;
|
|||
/// This is convenient, though not preferred. Pros/cons:
|
||||
/// + It avoids passing executor all the way to each call sites.
|
||||
/// + The call site can assume that executor will never shutdown.
|
||||
/// + Provides similar functionality as async_task with a few improvements
|
||||
/// around ability to cancel.
|
||||
/// + Provides similar functionality as async_task with a few improvements around ability to cancel.
|
||||
/// - Globals are harder to reason about.
|
||||
static EXECUTOR: Lazy<CancellableBlockingPool> =
|
||||
Lazy::new(|| CancellableBlockingPool::new(256, Duration::from_secs(10)));
|
||||
|
@ -279,7 +278,6 @@ impl CancellableBlockingPool {
|
|||
/// This will block until all work that has been started by the worker threads is finished. Any
|
||||
/// work that was added to the `CancellableBlockingPool` but not yet picked up by a worker
|
||||
/// thread will not complete and `await`ing on the `Task` for that work will panic.
|
||||
///
|
||||
pub fn shutdown(&self) -> Result<(), Error> {
|
||||
self.shutdown_with_timeout(DEFAULT_SHUTDOWN_TIMEOUT)
|
||||
}
|
||||
|
|
|
@ -370,8 +370,8 @@ impl Condvar {
|
|||
|
||||
let set_on_release = if waiters.is_empty() {
|
||||
// SAFETY:
|
||||
// Clear the rwlock associated with this Condvar since there are no longer any waiters. Safe
|
||||
// because we the spin lock guarantees exclusive access.
|
||||
// Clear the rwlock associated with this Condvar since there are no longer any waiters.
|
||||
// Safe because we the spin lock guarantees exclusive access.
|
||||
unsafe { *self.mu.get() = 0 };
|
||||
|
||||
0
|
||||
|
|
|
@ -102,8 +102,8 @@ enum OpStatus {
|
|||
WakeEvent,
|
||||
}
|
||||
|
||||
// An IO source previously registered with an EpollReactor. Used to initiate asynchronous IO with the
|
||||
// associated executor.
|
||||
// An IO source previously registered with an EpollReactor. Used to initiate asynchronous IO with
|
||||
// the associated executor.
|
||||
pub struct RegisteredSource<F> {
|
||||
pub(crate) source: F,
|
||||
ex: Weak<RawExecutor<EpollReactor>>,
|
||||
|
|
|
@ -159,8 +159,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
|
|||
loop {
|
||||
let res = if let Some(offset) = file_offset {
|
||||
// SAFETY:
|
||||
// Safe because we trust the kernel not to write path the length given and the length is
|
||||
// guaranteed to be valid from the pointer by io_slice_mut.
|
||||
// Safe because we trust the kernel not to write path the length given and the
|
||||
// length is guaranteed to be valid from the pointer by
|
||||
// io_slice_mut.
|
||||
unsafe {
|
||||
libc::preadv64(
|
||||
self.registered_source.duped_fd.as_raw_fd(),
|
||||
|
@ -171,8 +172,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
|
|||
}
|
||||
} else {
|
||||
// SAFETY:
|
||||
// Safe because we trust the kernel not to write path the length given and the length is
|
||||
// guaranteed to be valid from the pointer by io_slice_mut.
|
||||
// Safe because we trust the kernel not to write path the length given and the
|
||||
// length is guaranteed to be valid from the pointer by
|
||||
// io_slice_mut.
|
||||
unsafe {
|
||||
libc::readv(
|
||||
self.registered_source.duped_fd.as_raw_fd(),
|
||||
|
@ -272,8 +274,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
|
|||
loop {
|
||||
let res = if let Some(offset) = file_offset {
|
||||
// SAFETY:
|
||||
// Safe because we trust the kernel not to write path the length given and the length is
|
||||
// guaranteed to be valid from the pointer by io_slice_mut.
|
||||
// Safe because we trust the kernel not to write path the length given and the
|
||||
// length is guaranteed to be valid from the pointer by
|
||||
// io_slice_mut.
|
||||
unsafe {
|
||||
libc::pwritev64(
|
||||
self.registered_source.duped_fd.as_raw_fd(),
|
||||
|
@ -284,8 +287,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
|
|||
}
|
||||
} else {
|
||||
// SAFETY:
|
||||
// Safe because we trust the kernel not to write path the length given and the length is
|
||||
// guaranteed to be valid from the pointer by io_slice_mut.
|
||||
// Safe because we trust the kernel not to write path the length given and the
|
||||
// length is guaranteed to be valid from the pointer by
|
||||
// io_slice_mut.
|
||||
unsafe {
|
||||
libc::writev(
|
||||
self.registered_source.duped_fd.as_raw_fd(),
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
//! What if the kernel's reference to the buffer outlives the buffer itself? This could happen if a
|
||||
//! read operation was submitted, then the memory is dropped. To solve this, the executor takes an
|
||||
//! Arc to the backing memory. Vecs being read to are also wrapped in an Arc before being passed to
|
||||
//! the executor. The executor holds the Arc and ensures all operations are complete before dropping
|
||||
//! it, that guarantees the memory is valid for the duration.
|
||||
//! the executor. The executor holds the Arc and ensures all operations are complete before
|
||||
//! dropping it, that guarantees the memory is valid for the duration.
|
||||
//!
|
||||
//! The buffers _have_ to be on the heap. Because we don't have a way to cancel a future if it is
|
||||
//! dropped(can't rely on drop running), there is no way to ensure the kernel's buffer remains valid
|
||||
|
@ -936,8 +936,8 @@ mod tests {
|
|||
.register_source(&ex, &rx)
|
||||
.expect("register source failed");
|
||||
|
||||
// Submit the op to the kernel. Next, test that the source keeps its Arc open for the duration
|
||||
// of the op.
|
||||
// Submit the op to the kernel. Next, test that the source keeps its Arc open for the
|
||||
// duration of the op.
|
||||
let pending_op = registered_source
|
||||
.start_read_to_mem(None, Arc::clone(&bm), [MemRegion { offset: 0, len: 8 }])
|
||||
.expect("failed to start read to mem");
|
||||
|
@ -983,8 +983,8 @@ mod tests {
|
|||
.register_source(&ex, &tx)
|
||||
.expect("register source failed");
|
||||
|
||||
// Submit the op to the kernel. Next, test that the source keeps its Arc open for the duration
|
||||
// of the op.
|
||||
// Submit the op to the kernel. Next, test that the source keeps its Arc open for the
|
||||
// duration of the op.
|
||||
let pending_op = registered_source
|
||||
.start_write_from_mem(None, Arc::clone(&bm), [MemRegion { offset: 0, len: 8 }])
|
||||
.expect("failed to start write to mem");
|
||||
|
|
|
@ -39,11 +39,12 @@ impl EventAsync {
|
|||
Self::new_without_reset(
|
||||
// SAFETY:
|
||||
// Safe because:
|
||||
// a) the underlying Event should be validated by the caller.
|
||||
// b) we do NOT take ownership of the underlying Event. If we did that would cause an early
|
||||
// free (and later a double free @ the end of this scope). This is why we have to wrap
|
||||
// it in ManuallyDrop.
|
||||
// c) we own the clone that is produced exclusively, so it is safe to take ownership of it.
|
||||
// * the underlying Event should be validated by the caller.
|
||||
// * we do NOT take ownership of the underlying Event. If we did that would cause an
|
||||
// early free (and later a double free @ the end of this scope). This is why we have
|
||||
// to wrap it in ManuallyDrop.
|
||||
// * we own the clone that is produced exclusively, so it is safe to take ownership of
|
||||
// it.
|
||||
unsafe {
|
||||
ManuallyDrop::new(Event::from_raw_descriptor(descriptor.as_raw_descriptor()))
|
||||
}
|
||||
|
|
|
@ -259,8 +259,8 @@ impl WeakWake for HandleReactor {
|
|||
/// 1. The reactor in use is a HandleReactor.
|
||||
/// 2. Immediately after the IO syscall, this future MUST be awaited. We rely on the fact that
|
||||
/// the executor cannot poll the IOCP before this future is polled for the first time to
|
||||
/// ensure the waker has been registered. (If the executor polls the IOCP before the waker
|
||||
/// is registered, the future will stall.)
|
||||
/// ensure the waker has been registered. (If the executor polls the IOCP before the waker is
|
||||
/// registered, the future will stall.)
|
||||
pub(crate) struct OverlappedOperation {
|
||||
overlapped: BoxedOverlapped,
|
||||
ex: Weak<RawExecutor<HandleReactor>>,
|
||||
|
|
|
@ -331,8 +331,8 @@ impl<F: AsRawDescriptor> HandleSource<F> {
|
|||
.spawn(
|
||||
move || {
|
||||
let mut file = get_thread_file(descriptors);
|
||||
// ZeroRange calls `punch_hole` which doesn't extend the File size if it needs to.
|
||||
// Will fix if it becomes a problem.
|
||||
// ZeroRange calls `punch_hole` which doesn't extend the File size if it needs
|
||||
// to. Will fix if it becomes a problem.
|
||||
file.write_zeroes_at(file_offset, len as usize)
|
||||
.map_err(Error::IoWriteZeroesError)?;
|
||||
Ok(())
|
||||
|
|
|
@ -203,7 +203,8 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
|
|||
.map_err(Error::BackingMemoryVolatileSliceFetchFailed)?;
|
||||
|
||||
// SAFETY:
|
||||
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to.
|
||||
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory
|
||||
// region it refers to.
|
||||
unsafe {
|
||||
read(
|
||||
self.source.as_raw_descriptor(),
|
||||
|
@ -291,7 +292,8 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
|
|||
.map_err(Error::BackingMemoryVolatileSliceFetchFailed)?;
|
||||
|
||||
// SAFETY:
|
||||
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to.
|
||||
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory
|
||||
// region it refers to.
|
||||
unsafe {
|
||||
write(
|
||||
self.source.as_raw_descriptor(),
|
||||
|
|
|
@ -168,9 +168,10 @@ where
|
|||
// Safe because self.descriptor is valid in any state except New or Finished.
|
||||
//
|
||||
// Note: this method call is critical for supplying the safety guarantee relied upon by
|
||||
// wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not running
|
||||
// and won't be scheduled again, which makes it safe to drop self.inner_for_callback
|
||||
// (wait_for_handle_waker has a non owning pointer to self.inner_for_callback).
|
||||
// wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not
|
||||
// running and won't be scheduled again, which makes it safe to drop
|
||||
// self.inner_for_callback (wait_for_handle_waker has a non owning pointer
|
||||
// to self.inner_for_callback).
|
||||
unsafe { unregister_wait(wait_object) }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,6 @@ pub fn push_descriptors_internal(keep_rds: &mut Vec<RawDescriptor>) {
|
|||
/// Categories that are enabled will have their events traced at runtime via
|
||||
/// `trace_event_begin!()`, `trace_event_end!()`, or `trace_event!()` scoped tracing.
|
||||
/// The categories that are marked as false will have their events skipped.
|
||||
///
|
||||
macro_rules! setup_trace_marker {
|
||||
($(($cat:ident, $enabled:literal)),+) => {
|
||||
#[allow(non_camel_case_types, missing_docs)]
|
||||
|
@ -159,7 +158,6 @@ macro_rules! setup_trace_marker {
|
|||
/// - `$uid: Exit: exec`
|
||||
///
|
||||
/// where `$uid` will be the same unique value across those two events.
|
||||
///
|
||||
macro_rules! trace_event {
|
||||
($category:ident, $name:literal, $($arg:expr),+) => {{
|
||||
if($crate::ENABLED_CATEGORIES[$crate::TracedCategories::$category as usize].load(std::sync::atomic::Ordering::Relaxed)) {
|
||||
|
|
|
@ -427,10 +427,10 @@ use bitmasks::*;
|
|||
///
|
||||
/// This function packs bits in NTSTATUS results (generally what a Windows exit code should be).
|
||||
/// There are three primary cases it deals with:
|
||||
/// 1. Vendor specific exits. These are error codes we generate explicitly in crosvm. We will
|
||||
/// pack these codes with the lower 6 "facility" bits ([21, 16]) set so they can't collide
|
||||
/// with the other cases (this makes our facility value > FACILITY_MAXIMUM_VALUE). The top
|
||||
/// 6 bits of the facility field ([27, 22]) will be clear at this point.
|
||||
/// 1. Vendor specific exits. These are error codes we generate explicitly in crosvm. We will pack
|
||||
/// these codes with the lower 6 "facility" bits ([21, 16]) set so they can't collide with the
|
||||
/// other cases (this makes our facility value > FACILITY_MAXIMUM_VALUE). The top 6 bits of the
|
||||
/// facility field ([27, 22]) will be clear at this point.
|
||||
///
|
||||
/// 2. Non vendor NTSTATUS exits. These are error codes which come from Windows. We flip the
|
||||
/// vendor bit on these because we're going to pack the facility field, and leaving it unset
|
||||
|
@ -440,9 +440,8 @@ use bitmasks::*;
|
|||
/// however, if for some reason we see a non vendor code with any of those bits set, we will
|
||||
/// fall through to case #3.
|
||||
///
|
||||
/// 3. Non NTSTATUS errors. We detect these with two heuristics:
|
||||
/// a) Reserved field is set.
|
||||
/// b) The facility field has exceeded the bottom six bits ([21, 16]).
|
||||
/// 3. Non NTSTATUS errors. We detect these with two heuristics: a) Reserved field is set. b) The
|
||||
/// facility field has exceeded the bottom six bits ([21, 16]).
|
||||
///
|
||||
/// For such cases, we pack as much of the error as we can into the lower 6 bits of the
|
||||
/// facility field, and code field (2 bytes). In this case, the most significant bit of the
|
||||
|
|
|
@ -368,7 +368,8 @@ pub extern "C" fn crosvm_client_max_usb_devices() -> usize {
|
|||
USB_CONTROL_MAX_PORTS
|
||||
}
|
||||
|
||||
/// Returns all USB devices passed through the crosvm instance whose control socket is listening on `socket_path`.
|
||||
/// Returns all USB devices passed through the crosvm instance whose control socket is listening on
|
||||
/// `socket_path`.
|
||||
///
|
||||
/// The function returns the amount of entries written.
|
||||
/// # Arguments
|
||||
|
@ -863,7 +864,8 @@ pub struct BalloonWSRConfigFfi {
|
|||
report_threshold: u64,
|
||||
}
|
||||
|
||||
/// Returns balloon working set of the crosvm instance whose control socket is listening on socket_path.
|
||||
/// Returns balloon working set of the crosvm instance whose control socket is listening on
|
||||
/// socket_path.
|
||||
///
|
||||
/// The function returns true on success or false if an error occurred.
|
||||
///
|
||||
|
|
|
@ -312,10 +312,10 @@ impl GoldfishBattery {
|
|||
/// Create GoldfishBattery device model
|
||||
///
|
||||
/// * `mmio_base` - The 32-bit mmio base address.
|
||||
/// * `irq_num` - The corresponding interrupt number of the irq_evt
|
||||
/// which will be put into the ACPI DSDT.
|
||||
/// * `irq_evt` - The interrupt event used to notify driver about
|
||||
/// the battery properties changing.
|
||||
/// * `irq_num` - The corresponding interrupt number of the irq_evt which will be put into the
|
||||
/// ACPI DSDT.
|
||||
/// * `irq_evt` - The interrupt event used to notify driver about the battery properties
|
||||
/// changing.
|
||||
/// * `socket` - Battery control socket
|
||||
pub fn new(
|
||||
mmio_base: u64,
|
||||
|
|
|
@ -670,7 +670,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
// Attempt to add a file to an fw_cfg device w/ no fileslots and assert that nothing gets inserted
|
||||
// Attempt to add a file to an fw_cfg device w/ no fileslots and assert that nothing gets
|
||||
// inserted
|
||||
fn write_file_one_slot_expect_nop() {
|
||||
let mut fw_cfg = FwCfgDevice::new(0, default_params()).unwrap();
|
||||
let data = vec![MAGIC_BYTE];
|
||||
|
@ -681,7 +682,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
// Attempt to add two files to an fw_cfg w/ only one fileslot and assert only first insert succeeds.
|
||||
// Attempt to add two files to an fw_cfg w/ only one fileslot and assert only first insert
|
||||
// succeeds.
|
||||
fn write_two_files_no_slots_expect_nop_on_second() {
|
||||
let mut fw_cfg = FwCfgDevice::new(1, default_params()).unwrap();
|
||||
let data = vec![MAGIC_BYTE];
|
||||
|
@ -706,8 +708,8 @@ mod tests {
|
|||
fn read_fw_cfg_signature() {
|
||||
let mut data: Vec<u8> = vec![0];
|
||||
let (mut device, bai) = setup_read(&FILENAMES, &get_contents(), FW_CFG_SIGNATURE_SELECTOR);
|
||||
// To logically compare the revison vector to FW_CFG_REVISION byte-by-byte, we must use to_be_bytes()
|
||||
// since we are comparing byte arrays, not integers.
|
||||
// To logically compare the revison vector to FW_CFG_REVISION byte-by-byte, we must use
|
||||
// to_be_bytes() since we are comparing byte arrays, not integers.
|
||||
let signature = read_u32(&mut device, bai, &mut data[..]).to_be_bytes();
|
||||
assert_eq!(signature, FW_CFG_SIGNATURE);
|
||||
}
|
||||
|
@ -717,8 +719,8 @@ mod tests {
|
|||
fn read_fw_cfg_revision() {
|
||||
let mut data: Vec<u8> = vec![0];
|
||||
let (mut device, bai) = setup_read(&FILENAMES, &get_contents(), FW_CFG_REVISION_SELECTOR);
|
||||
// To logically compare the revison vector to FW_CFG_REVISION byte-by-byte, we must use to_be_bytes()
|
||||
// since we are comparing byte arrays, not integers.
|
||||
// To logically compare the revison vector to FW_CFG_REVISION byte-by-byte, we must use
|
||||
// to_be_bytes() since we are comparing byte arrays, not integers.
|
||||
let revision = read_u32(&mut device, bai, &mut data[..]).to_be_bytes();
|
||||
assert_eq!(revision, FW_CFG_REVISION);
|
||||
}
|
||||
|
|
|
@ -52,8 +52,9 @@ pub struct IrqLevelEvent {
|
|||
/// An event used by the device backend to signal hypervisor/VM about data or new unit
|
||||
/// of work being available.
|
||||
trigger_evt: Event,
|
||||
/// An event used by the hypervisor to signal device backend that it completed processing a unit
|
||||
/// of work and that device should re-raise `trigger_evt` if additional work needs to be done.
|
||||
/// An event used by the hypervisor to signal device backend that it completed processing a
|
||||
/// unit of work and that device should re-raise `trigger_evt` if additional work needs to
|
||||
/// be done.
|
||||
resample_evt: Event,
|
||||
}
|
||||
|
||||
|
|
|
@ -82,8 +82,9 @@ pub struct Apic {
|
|||
/// Base duration for the APIC timer. A timer set with initial count = 1 and timer frequency
|
||||
/// divide = 1 runs for this long.
|
||||
cycle_length: Duration,
|
||||
// Register state bytes. Each register is 16-byte aligned, but only its first 4 bytes are used.
|
||||
// The register MMIO space is 4 KiB, but only the first 1 KiB (64 registers * 16 bytes) is used.
|
||||
// Register state bytes. Each register is 16-byte aligned, but only its first 4 bytes are
|
||||
// used. The register MMIO space is 4 KiB, but only the first 1 KiB (64 registers * 16
|
||||
// bytes) is used.
|
||||
regs: [u8; APIC_MEM_LENGTH_BYTES as usize],
|
||||
// Multiprocessing initialization state: running, waiting for SIPI, etc.
|
||||
mp_state: MPState,
|
||||
|
@ -96,14 +97,15 @@ pub struct Apic {
|
|||
// When the timer started or last ticked. For one-shot timers, this is the Instant when the
|
||||
// timer started. For periodic timers, it's the Instant when it started or last expired.
|
||||
last_tick: Instant,
|
||||
// Pending startup interrupt vector. There can only be one pending startup interrupt at a time.
|
||||
// Pending startup interrupt vector. There can only be one pending startup interrupt at a
|
||||
// time.
|
||||
sipi: Option<Vector>,
|
||||
// True if there's a pending INIT interrupt to send to the CPU.
|
||||
init: bool,
|
||||
// The number of pending non-maskable interrupts to be injected into the CPU. The architecture
|
||||
// specifies that multiple NMIs can be sent concurrently and will be processed in order. Unlike
|
||||
// fixed interrupts there's no architecturally defined place where the NMIs are queued or
|
||||
// stored, we need to store them separately.
|
||||
// specifies that multiple NMIs can be sent concurrently and will be processed in order.
|
||||
// Unlike fixed interrupts there's no architecturally defined place where the NMIs are
|
||||
// queued or stored, we need to store them separately.
|
||||
nmis: u32,
|
||||
}
|
||||
|
||||
|
@ -728,11 +730,11 @@ pub struct InterruptDestination {
|
|||
/// The APIC ID that sent this interrupt.
|
||||
pub source_id: u8,
|
||||
/// In physical destination mode, used to specify the APIC ID of the destination processor.
|
||||
/// In logical destination mode, used to specify a message destination address (MDA) that can be
|
||||
/// used to select specific processors in clusters. Only used if shorthand is None.
|
||||
/// In logical destination mode, used to specify a message destination address (MDA) that can
|
||||
/// be used to select specific processors in clusters. Only used if shorthand is None.
|
||||
pub dest_id: u8,
|
||||
/// Specifies a quick destination of all processors, all excluding self, or self. If None, then
|
||||
/// dest_id and mode are used to find the destinations.
|
||||
/// Specifies a quick destination of all processors, all excluding self, or self. If None,
|
||||
/// then dest_id and mode are used to find the destinations.
|
||||
pub shorthand: DestinationShorthand,
|
||||
/// Specifies if physical or logical addressing is used for matching dest_id.
|
||||
pub mode: DestinationMode,
|
||||
|
@ -747,7 +749,8 @@ pub struct InterruptData {
|
|||
pub delivery: DeliveryMode,
|
||||
/// Edge- or level-triggered.
|
||||
pub trigger: TriggerMode,
|
||||
/// For level-triggered interrupts, specifies whether the line should be asserted or deasserted.
|
||||
/// For level-triggered interrupts, specifies whether the line should be asserted or
|
||||
/// deasserted.
|
||||
pub level: Level,
|
||||
}
|
||||
|
||||
|
@ -883,14 +886,14 @@ impl Reg {
|
|||
#[repr(usize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum VectorReg {
|
||||
/// In-service register. A bit is set for each interrupt vector currently being serviced by the
|
||||
/// processor.
|
||||
/// In-service register. A bit is set for each interrupt vector currently being serviced by
|
||||
/// the processor.
|
||||
Isr = Reg::ISR,
|
||||
/// Trigger mode register. Records whether interrupts are edge-triggered (bit is clear) or
|
||||
/// level-triggered (bit is set).
|
||||
Tmr = Reg::TMR,
|
||||
/// Interrupt request register. A bit is set for each interrupt vector received by the APIC but
|
||||
/// not yet serviced by the processor.
|
||||
/// Interrupt request register. A bit is set for each interrupt vector received by the APIC
|
||||
/// but not yet serviced by the processor.
|
||||
Irr = Reg::IRR,
|
||||
}
|
||||
|
||||
|
@ -1587,7 +1590,8 @@ mod tests {
|
|||
irqs,
|
||||
PendingInterrupts {
|
||||
fixed: None,
|
||||
needs_window: false, // Not injectable due to higher priority ISRV, so no window needed.
|
||||
// Not injectable due to higher priority ISRV, so no window needed.
|
||||
needs_window: false,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
|
|
@ -214,8 +214,8 @@ impl IrqChip for GeniezoneKernelIrqChip {
|
|||
|
||||
/// Return a vector of all registered irq numbers and their associated events and event
|
||||
/// indices. These should be used by the main thread to wait for irq events.
|
||||
/// For the GeniezoneKernelIrqChip, the kernel handles listening to irq events being triggered by
|
||||
/// devices, so this function always returns an empty Vec.
|
||||
/// For the GeniezoneKernelIrqChip, the kernel handles listening to irq events being triggered
|
||||
/// by devices, so this function always returns an empty Vec.
|
||||
fn irq_event_tokens(&self) -> Result<Vec<(IrqEventIndex, IrqEventSource, Event)>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
|
|
@ -96,10 +96,10 @@ struct OutEventSnapshot {
|
|||
}
|
||||
|
||||
/// Snapshot of [Ioapic] state. Some fields were intentionally excluded:
|
||||
/// * [Ioapic::resample_events]: these will get re-registered when the VM is
|
||||
/// created (e.g. prior to restoring a snapshot).
|
||||
/// * [Ioapic::out_events]: this isn't serializable as it contains Events.
|
||||
/// Replaced by [IoapicSnapshot::out_event_snapshots].
|
||||
/// * [Ioapic::resample_events]: these will get re-registered when the VM is created (e.g. prior to
|
||||
/// restoring a snapshot).
|
||||
/// * [Ioapic::out_events]: this isn't serializable as it contains Events. Replaced by
|
||||
/// [IoapicSnapshot::out_event_snapshots].
|
||||
/// * [Ioapic::irq_tube]: will be set up as part of creating the VM.
|
||||
///
|
||||
/// See [Ioapic] for descriptions of fields by the same names.
|
||||
|
@ -131,8 +131,8 @@ pub struct Ioapic {
|
|||
ioregsel: u8,
|
||||
/// ioapicid register. Bits 24 - 27 contain the APIC ID for this device.
|
||||
ioapicid: u32,
|
||||
/// Remote IRR for Edge Triggered Real Time Clock interrupts, which allows the CMOS to know when
|
||||
/// one of its interrupts is being coalesced.
|
||||
/// Remote IRR for Edge Triggered Real Time Clock interrupts, which allows the CMOS to know
|
||||
/// when one of its interrupts is being coalesced.
|
||||
rtc_remote_irr: bool,
|
||||
/// Outgoing irq events that are used to inject MSI interrupts.
|
||||
/// Also contains the serializable form used for snapshotting.
|
||||
|
|
|
@ -170,8 +170,8 @@ impl AiaDescriptor {
|
|||
addr: raw_aplic_addr as u64,
|
||||
flags: 0,
|
||||
};
|
||||
// Safe because we allocated the struct that's being passed in, and raw_aplic_addr is pointing
|
||||
// to a uniquely owned local, mutable variable.
|
||||
// Safe because we allocated the struct that's being passed in, and raw_aplic_addr is
|
||||
// pointing to a uniquely owned local, mutable variable.
|
||||
let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR(), &kvm_attr) };
|
||||
if ret != 0 {
|
||||
return errno_result();
|
||||
|
|
|
@ -611,8 +611,8 @@ impl IrqChip for KvmSplitIrqChip {
|
|||
vcpu.interrupt(vector)?;
|
||||
}
|
||||
|
||||
// The second interrupt request should be handled immediately, so ask vCPU to exit as soon as
|
||||
// possible.
|
||||
// The second interrupt request should be handled immediately, so ask vCPU to exit as soon
|
||||
// as possible.
|
||||
if self.interrupt_requested(vcpu_id) {
|
||||
vcpu.set_interrupt_window_requested(true);
|
||||
}
|
||||
|
|
|
@ -161,7 +161,8 @@ pub trait IrqChip: Send {
|
|||
/// Add a vcpu to the irq chip.
|
||||
fn add_vcpu(&mut self, vcpu_id: usize, vcpu: &dyn Vcpu) -> Result<()>;
|
||||
|
||||
/// Register an event with edge-trigger semantic that can trigger an interrupt for a particular GSI.
|
||||
/// Register an event with edge-trigger semantic that can trigger an interrupt for a particular
|
||||
/// GSI.
|
||||
fn register_edge_irq_event(
|
||||
&mut self,
|
||||
irq: u32,
|
||||
|
@ -172,7 +173,8 @@ pub trait IrqChip: Send {
|
|||
/// Unregister an event with edge-trigger semantic for a particular GSI.
|
||||
fn unregister_edge_irq_event(&mut self, irq: u32, irq_event: &IrqEdgeEvent) -> Result<()>;
|
||||
|
||||
/// Register an event with level-trigger semantic that can trigger an interrupt for a particular GSI.
|
||||
/// Register an event with level-trigger semantic that can trigger an interrupt for a particular
|
||||
/// GSI.
|
||||
fn register_level_irq_event(
|
||||
&mut self,
|
||||
irq: u32,
|
||||
|
|
|
@ -389,8 +389,8 @@ impl IrqChip for WhpxSplitIrqChip {
|
|||
vcpu.interrupt(vector as u32)?;
|
||||
}
|
||||
|
||||
// The second interrupt request should be handled immediately, so ask vCPU to exit as soon as
|
||||
// possible.
|
||||
// The second interrupt request should be handled immediately, so ask vCPU to exit as soon
|
||||
// as possible.
|
||||
if self.interrupt_requested(vcpu_id) {
|
||||
vcpu.set_interrupt_window_requested(true);
|
||||
}
|
||||
|
|
|
@ -395,8 +395,8 @@ pub trait PciDevice: Send + Suspendable {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a reference to the API client for sending VmMemoryRequest. Any devices that uses ioevents
|
||||
/// must provide this.
|
||||
/// Gets a reference to the API client for sending VmMemoryRequest. Any devices that uses
|
||||
/// ioevents must provide this.
|
||||
fn get_vm_memory_client(&self) -> Option<&VmMemoryClient> {
|
||||
None
|
||||
}
|
||||
|
|
|
@ -282,7 +282,8 @@ impl VfioPlatformDevice {
|
|||
let host = mmap.as_ptr() as u64;
|
||||
// SAFETY:
|
||||
// Safe because the given guest_map_start is valid guest bar address. and
|
||||
// the host pointer is correct and valid guaranteed by MemoryMapping interface.
|
||||
// the host pointer is correct and valid guaranteed by MemoryMapping
|
||||
// interface.
|
||||
match unsafe {
|
||||
self.device
|
||||
.vfio_dma_map(guest_map_start, mmap_size, host, true)
|
||||
|
|
|
@ -218,9 +218,10 @@ impl Serial {
|
|||
for event in events.iter() {
|
||||
match event.token {
|
||||
Token::Kill => {
|
||||
// Ignore the kill event until there are no other events to process so that
|
||||
// we drain `rx` as much as possible. The next `wait_ctx.wait()` call will
|
||||
// immediately re-entry this case since we don't call `kill_evt.wait()`.
|
||||
// Ignore the kill event until there are no other events to process
|
||||
// so that we drain `rx` as much as possible. The next
|
||||
// `wait_ctx.wait()` call will immediately re-entry this case since
|
||||
// we don't call `kill_evt.wait()`.
|
||||
if events.iter().all(|e| matches!(e.token, Token::Kill)) {
|
||||
return rx;
|
||||
}
|
||||
|
@ -262,7 +263,8 @@ impl Serial {
|
|||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Being interrupted is not an error, but everything else is.
|
||||
// Being interrupted is not an error, but everything else
|
||||
// is.
|
||||
if e.kind() != io::ErrorKind::Interrupted {
|
||||
error!(
|
||||
"failed to read for bytes to queue into serial device: {}",
|
||||
|
|
|
@ -118,8 +118,8 @@ fn tsc_sync_mitigations_inner(
|
|||
let host_tsc_now = rdtsc();
|
||||
|
||||
for i in 0..num_vcpus {
|
||||
// This handles the case where num_vcpus > num_cores, even though we try to avoid that in
|
||||
// practice.
|
||||
// This handles the case where num_vcpus > num_cores, even though we try to avoid that
|
||||
// in practice.
|
||||
let pinned_core = i % num_cores;
|
||||
|
||||
mitigations.affinities[i] = Some(vec![pinned_core]);
|
||||
|
|
|
@ -660,8 +660,8 @@ impl BackendDeviceType {
|
|||
) -> Result<()> {
|
||||
let transfer_status = {
|
||||
// We need to hold the lock to avoid race condition.
|
||||
// While we are trying to submit the transfer, another thread might want to cancel the same
|
||||
// transfer. Holding the lock here makes sure one of them is cancelled.
|
||||
// While we are trying to submit the transfer, another thread might want to cancel the
|
||||
// same transfer. Holding the lock here makes sure one of them is cancelled.
|
||||
let mut state = xhci_transfer.state().lock();
|
||||
match mem::replace(&mut *state, XhciTransferState::Cancelled) {
|
||||
XhciTransferState::Created => {
|
||||
|
|
|
@ -172,8 +172,8 @@ impl EventLoop {
|
|||
.map_err(Error::WaitContextAddDescriptor)
|
||||
}
|
||||
|
||||
/// Removes event for this `descriptor`. This function is safe to call even when the `descriptor`
|
||||
/// is not actively being polled because it's been paused.
|
||||
/// Removes event for this `descriptor`. This function is safe to call even when the
|
||||
/// `descriptor` is not actively being polled because it's been paused.
|
||||
///
|
||||
/// EventLoop does not guarantee all events for `descriptor` is handled.
|
||||
pub fn remove_event_for_descriptor(&self, descriptor: &dyn AsRawDescriptor) -> Result<()> {
|
||||
|
|
|
@ -757,8 +757,8 @@ impl VfioGroup {
|
|||
|
||||
let container_raw_descriptor = container.as_raw_descriptor();
|
||||
// SAFETY:
|
||||
// Safe as we are the owner of group_file and container_raw_descriptor which are valid value,
|
||||
// and we verify the ret value
|
||||
// Safe as we are the owner of group_file and container_raw_descriptor which are valid
|
||||
// value, and we verify the ret value
|
||||
ret = unsafe {
|
||||
ioctl_with_ref(
|
||||
&group_file,
|
||||
|
@ -1355,10 +1355,10 @@ impl VfioDevice {
|
|||
}
|
||||
|
||||
/// Enable vfio device's irq and associate Irqfd Event with device.
|
||||
/// When MSIx is enabled, multi vectors will be supported, and vectors starting from subindex to subindex +
|
||||
/// descriptors length will be assigned with irqfd in the descriptors array.
|
||||
/// when index = VFIO_PCI_REQ_IRQ_INDEX, kernel vfio will trigger this event when physical device
|
||||
/// is removed.
|
||||
/// When MSIx is enabled, multi vectors will be supported, and vectors starting from subindex to
|
||||
/// subindex + descriptors length will be assigned with irqfd in the descriptors array.
|
||||
/// when index = VFIO_PCI_REQ_IRQ_INDEX, kernel vfio will trigger this event when physical
|
||||
/// device is removed.
|
||||
/// If descriptor is None, -1 is assigned to the irq. A value of -1 is used to either de-assign
|
||||
/// interrupts if already assigned or skip un-assigned interrupts.
|
||||
pub fn irq_enable(
|
||||
|
|
|
@ -108,8 +108,8 @@ pub struct DiskOption {
|
|||
)]
|
||||
pub io_concurrency: NonZeroU32,
|
||||
#[serde(default)]
|
||||
/// Experimental option to run multiple worker threads in parallel. If false, only single thread
|
||||
/// runs by default. Note this option is not effective for vhost-user blk device.
|
||||
/// Experimental option to run multiple worker threads in parallel. If false, only single
|
||||
/// thread runs by default. Note this option is not effective for vhost-user blk device.
|
||||
pub multiple_workers: bool,
|
||||
#[serde(default, alias = "async_executor")]
|
||||
/// The async executor kind to simulate the block device with. This option takes
|
||||
|
|
|
@ -436,9 +436,9 @@ impl VirtioDevice for Console {
|
|||
match queues_state {
|
||||
None => Ok(()),
|
||||
Some((mem, interrupt, queues)) => {
|
||||
// TODO(khei): activate is just what we want at the moment, but we should probably move
|
||||
// it into a "start workers" function to make it obvious that it isn't strictly
|
||||
// used for activate events.
|
||||
// TODO(khei): activate is just what we want at the moment, but we should probably
|
||||
// move it into a "start workers" function to make it obvious that
|
||||
// it isn't strictly used for activate events.
|
||||
self.activate(mem, interrupt, queues)?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -22,15 +22,15 @@ pub enum CachePolicy {
|
|||
/// the FUSE client (i.e., the file system does not have exclusive access to the directory).
|
||||
Never,
|
||||
|
||||
/// The client is free to choose when and how to cache file data. This is the default policy and
|
||||
/// uses close-to-open consistency as described in the enum documentation.
|
||||
/// The client is free to choose when and how to cache file data. This is the default policy
|
||||
/// and uses close-to-open consistency as described in the enum documentation.
|
||||
#[default]
|
||||
Auto,
|
||||
|
||||
/// The client should always cache file data. This means that the FUSE client will not
|
||||
/// invalidate any cached data that was returned by the file system the last time the file was
|
||||
/// opened. This policy should only be selected when the file system has exclusive access to the
|
||||
/// directory.
|
||||
/// opened. This policy should only be selected when the file system has exclusive access to
|
||||
/// the directory.
|
||||
Always,
|
||||
}
|
||||
|
||||
|
@ -108,17 +108,17 @@ pub struct Config {
|
|||
#[serde(default, alias = "cache")]
|
||||
pub cache_policy: CachePolicy,
|
||||
|
||||
/// Whether the file system should enabled writeback caching. This can improve performance as it
|
||||
/// allows the FUSE client to cache and coalesce multiple writes before sending them to the file
|
||||
/// system. However, enabling this option can increase the risk of data corruption if the file
|
||||
/// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT**
|
||||
/// have exclusive access). Additionally, the file system should have read access to all files
|
||||
/// in the directory it is serving as the FUSE client may send read requests even for files
|
||||
/// opened with `O_WRONLY`.
|
||||
/// Whether the file system should enabled writeback caching. This can improve performance as
|
||||
/// it allows the FUSE client to cache and coalesce multiple writes before sending them to
|
||||
/// the file system. However, enabling this option can increase the risk of data corruption
|
||||
/// if the file contents can change without the knowledge of the FUSE client (i.e., the
|
||||
/// server does **NOT** have exclusive access). Additionally, the file system should have
|
||||
/// read access to all files in the directory it is serving as the FUSE client may send
|
||||
/// read requests even for files opened with `O_WRONLY`.
|
||||
///
|
||||
/// Therefore callers should only enable this option when they can guarantee that: 1) the file
|
||||
/// system has exclusive access to the directory and 2) the file system has read permissions for
|
||||
/// all files in that directory.
|
||||
/// system has exclusive access to the directory and 2) the file system has read permissions
|
||||
/// for all files in that directory.
|
||||
///
|
||||
/// The default value for this option is `false`.
|
||||
#[serde(default)]
|
||||
|
@ -127,8 +127,8 @@ pub struct Config {
|
|||
/// Controls whether security.* xattrs (except for security.selinux) are re-written. When this
|
||||
/// is set to true, the server will add a "user.virtiofs" prefix to xattrs in the security
|
||||
/// namespace. Setting these xattrs requires CAP_SYS_ADMIN in the namespace where the file
|
||||
/// system was mounted and since the server usually runs in an unprivileged user namespace, it's
|
||||
/// unlikely to have that capability.
|
||||
/// system was mounted and since the server usually runs in an unprivileged user namespace,
|
||||
/// it's unlikely to have that capability.
|
||||
///
|
||||
/// The default value for this option is `false`.
|
||||
#[serde(default, alias = "rewrite-security-xattrs")]
|
||||
|
@ -140,21 +140,21 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub ascii_casefold: bool,
|
||||
|
||||
// UIDs which are privileged to perform quota-related operations. We cannot perform a CAP_FOWNER
|
||||
// check so we consult this list when the VM tries to set the project quota and the process uid
|
||||
// doesn't match the owner uid. In that case, all uids in this list are treated as if they have
|
||||
// CAP_FOWNER.
|
||||
// UIDs which are privileged to perform quota-related operations. We cannot perform a
|
||||
// CAP_FOWNER check so we consult this list when the VM tries to set the project quota and
|
||||
// the process uid doesn't match the owner uid. In that case, all uids in this list are
|
||||
// treated as if they have CAP_FOWNER.
|
||||
#[cfg(feature = "arc_quota")]
|
||||
#[serde(default, deserialize_with = "deserialize_privileged_quota_uids")]
|
||||
pub privileged_quota_uids: Vec<libc::uid_t>,
|
||||
|
||||
/// Use DAX for shared files.
|
||||
///
|
||||
/// Enabling DAX can improve performance for frequently accessed files by mapping regions of the
|
||||
/// file directly into the VM's memory region, allowing direct access with the cost of slightly
|
||||
/// increased latency the first time the file is accessed. Additionally, since the mapping is
|
||||
/// shared directly from the host kernel's file cache, enabling DAX can improve performance even
|
||||
/// when the cache policy is `Never`.
|
||||
/// Enabling DAX can improve performance for frequently accessed files by mapping regions of
|
||||
/// the file directly into the VM's memory region, allowing direct access with the cost of
|
||||
/// slightly increased latency the first time the file is accessed. Additionally, since the
|
||||
/// mapping is shared directly from the host kernel's file cache, enabling DAX can improve
|
||||
/// performance even when the cache policy is `Never`.
|
||||
///
|
||||
/// The default value for this option is `false`.
|
||||
#[serde(default, alias = "dax")]
|
||||
|
|
|
@ -1506,9 +1506,10 @@ impl PassthroughFs {
|
|||
let data = self.find_handle(handle, inode)?;
|
||||
|
||||
{
|
||||
// We can't enable verity while holding a writable fd. We don't know whether the file
|
||||
// was opened for writing so check it here. We don't expect this to be a frequent
|
||||
// operation so the extra latency should be fine.
|
||||
// We can't enable verity while holding a writable fd. We don't know whether the
|
||||
// file was opened for writing so check it here. We don't expect
|
||||
// this to be a frequent operation so the extra latency should be
|
||||
// fine.
|
||||
let mut file = data.file.lock();
|
||||
let flags = FileFlags::from_file(&*file).map_err(io::Error::from)?;
|
||||
match flags {
|
||||
|
@ -3712,7 +3713,8 @@ mod tests {
|
|||
);
|
||||
}
|
||||
|
||||
// atomic_open with flag O_RDWR | O_CREATE | O_EXCL, should return positive dentry and file handler
|
||||
// atomic_open with flag O_RDWR | O_CREATE | O_EXCL, should return positive dentry and file
|
||||
// handler
|
||||
let res = atomic_open(
|
||||
&fs,
|
||||
&temp_dir.path().join("dir/c.txt"),
|
||||
|
|
|
@ -1182,9 +1182,9 @@ pub struct Gpu {
|
|||
udmabuf: bool,
|
||||
rutabaga_server_descriptor: Option<SafeDescriptor>,
|
||||
#[cfg(windows)]
|
||||
/// Because the Windows GpuDisplay can't expose an epollfd, it has to inform the GPU worker which
|
||||
/// descriptors to add to its wait context. That's what this Tube is used for (it is provided
|
||||
/// to each display backend.
|
||||
/// Because the Windows GpuDisplay can't expose an epollfd, it has to inform the GPU worker
|
||||
/// which descriptors to add to its wait context. That's what this Tube is used for (it is
|
||||
/// provided to each display backend.
|
||||
gpu_display_wait_descriptor_ctrl_wr: SendTube,
|
||||
#[cfg(windows)]
|
||||
/// The GPU worker uses this Tube to receive the descriptors that should be added to its wait
|
||||
|
|
|
@ -548,7 +548,8 @@ impl VirtioGpu {
|
|||
&self.display
|
||||
}
|
||||
|
||||
/// Gets the list of supported display resolutions as a slice of `(width, height, enabled)` tuples.
|
||||
/// Gets the list of supported display resolutions as a slice of `(width, height, enabled)`
|
||||
/// tuples.
|
||||
pub fn display_info(&self) -> Vec<(u32, u32, bool)> {
|
||||
(0..VIRTIO_GPU_MAX_SCANOUTS)
|
||||
.map(|scanout_id| scanout_id as u32)
|
||||
|
|
|
@ -64,7 +64,8 @@ pub fn new_keyboard_config(idx: u32) -> VirtioInputConfig {
|
|||
)
|
||||
}
|
||||
|
||||
/// Instantiates a VirtioInputConfig object with the default configuration for a collection of switches.
|
||||
/// Instantiates a VirtioInputConfig object with the default configuration for a collection of
|
||||
/// switches.
|
||||
pub fn new_switches_config(idx: u32) -> VirtioInputConfig {
|
||||
VirtioInputConfig::new(
|
||||
virtio_input_device_ids::new(0, 0, 0, 0),
|
||||
|
@ -76,7 +77,8 @@ pub fn new_switches_config(idx: u32) -> VirtioInputConfig {
|
|||
)
|
||||
}
|
||||
|
||||
/// Instantiates a VirtioInputConfig object with the default configuration for a collection of rotary.
|
||||
/// Instantiates a VirtioInputConfig object with the default configuration for a collection of
|
||||
/// rotary.
|
||||
pub fn new_rotary_config(idx: u32) -> VirtioInputConfig {
|
||||
VirtioInputConfig::new(
|
||||
virtio_input_device_ids::new(0, 0, 0, 0),
|
||||
|
|
|
@ -238,8 +238,8 @@ pub fn abs_info<T: AsRawDescriptor>(descriptor: &T) -> BTreeMap<u16, virtio_inpu
|
|||
}
|
||||
|
||||
/// Grabs an event device (see EVIOCGGRAB ioctl for details). After this function succeeds the given
|
||||
/// descriptor has exclusive access to the device, effectively making it unusable for any other process in
|
||||
/// the host.
|
||||
/// descriptor has exclusive access to the device, effectively making it unusable for any other
|
||||
/// process in the host.
|
||||
pub fn grab_evdev<T: AsRawDescriptor>(descriptor: &mut T) -> Result<()> {
|
||||
let val: u32 = 1;
|
||||
let ret = {
|
||||
|
|
|
@ -214,9 +214,9 @@ impl virtio_input_bitmap {
|
|||
if byte_pos < ret.len() {
|
||||
ret.bitmap[byte_pos] |= bit_byte;
|
||||
} else {
|
||||
// This would only happen if new event codes (or types, or ABS_*, etc) are defined to be
|
||||
// larger than or equal to 1024, in which case a new version of the virtio input
|
||||
// protocol needs to be defined.
|
||||
// This would only happen if new event codes (or types, or ABS_*, etc) are defined
|
||||
// to be larger than or equal to 1024, in which case a new version
|
||||
// of the virtio input protocol needs to be defined.
|
||||
// There is nothing we can do about this error except log it.
|
||||
error!("Attempted to set an out of bounds bit: {}", idx);
|
||||
}
|
||||
|
|
|
@ -411,7 +411,8 @@ impl State {
|
|||
|
||||
let vfio_map_result = match dmabuf_map {
|
||||
// SAFETY:
|
||||
// Safe because [dmabuf_map, dmabuf_map + size) refers to an external mmap'ed region.
|
||||
// Safe because [dmabuf_map, dmabuf_map + size) refers to an external mmap'ed
|
||||
// region.
|
||||
Some(dmabuf_map) => unsafe {
|
||||
mapper
|
||||
.1
|
||||
|
|
|
@ -169,9 +169,8 @@ pub struct CreateIpcMapperRet {
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
|
||||
/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This
|
||||
/// should be cloned and shared between different ipc mappers
|
||||
/// with different `endpoint_id`s.
|
||||
/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This should be cloned and
|
||||
/// shared between different ipc mappers with different `endpoint_id`s.
|
||||
pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
|
||||
let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
|
||||
CreateIpcMapperRet {
|
||||
|
|
|
@ -155,26 +155,24 @@ pub enum AddMapResult {
|
|||
/// the virtio request that triggered the fault.
|
||||
///
|
||||
/// As such, the flow of a fault is:
|
||||
/// 1) The guest sends an virtio-iommu message that triggers a fault. Faults can
|
||||
/// be triggered by unmap or detach messages, or by attach messages if such
|
||||
/// messages are re-attaching an endpoint to a new domain. One example of
|
||||
/// a guest event that can trigger such a message is a userspace VVU device
|
||||
/// process crashing and triggering the guest kernel to re-attach the VVU
|
||||
/// 1) The guest sends an virtio-iommu message that triggers a fault. Faults can be triggered by
|
||||
/// unmap or detach messages, or by attach messages if such messages are re-attaching an
|
||||
/// endpoint to a new domain. One example of a guest event that can trigger such a message is a
|
||||
/// userspace VVU device process crashing and triggering the guest kernel to re-attach the VVU
|
||||
/// device to the null endpoint.
|
||||
/// 2) The viommu device removes an exported mapping from the mapper.
|
||||
/// 3) The mapper signals the IOMMU fault eventfd and returns the fault
|
||||
/// resolution event to the viommu device.
|
||||
/// 4) The viommu device starts waiting on the fault resolution event. Note that
|
||||
/// although the viommu device and mapper are both running on the same
|
||||
/// executor, this wait is async. This means that although further processing
|
||||
/// of virtio-iommu requests is paused, the mapper continues to run.
|
||||
/// 3) The mapper signals the IOMMU fault eventfd and returns the fault resolution event to the
|
||||
/// viommu device.
|
||||
/// 4) The viommu device starts waiting on the fault resolution event. Note that although the
|
||||
/// viommu device and mapper are both running on the same executor, this wait is async. This
|
||||
/// means that although further processing of virtio-iommu requests is paused, the mapper
|
||||
/// continues to run.
|
||||
/// 5) The client receives the IOMMU fault.
|
||||
/// 6) The client releases all exported regions.
|
||||
/// 7) Once the mapper receives the final release message from the client,
|
||||
/// it signals the fault resolution event that the viommu device is
|
||||
/// waiting on.
|
||||
/// 8) The viommu device finishes processing the original virtio iommu
|
||||
/// request and sends a reply to the guest.
|
||||
/// 7) Once the mapper receives the final release message from the client, it signals the fault
|
||||
/// resolution event that the viommu device is waiting on.
|
||||
/// 8) The viommu device finishes processing the original virtio iommu request and sends a reply to
|
||||
/// the guest.
|
||||
pub trait MemoryMapper: Send {
|
||||
/// Creates a new mapping. If the mapping overlaps with an existing
|
||||
/// mapping, return Ok(false).
|
||||
|
|
|
@ -118,10 +118,10 @@ impl MemoryMapper for VfioWrapper {
|
|||
fn supports_detach(&self) -> bool {
|
||||
// A few reasons why we don't support detach:
|
||||
//
|
||||
// 1. Seems it's not possible to dynamically attach and detach a IOMMU domain if the
|
||||
// virtio IOMMU device is running on top of VFIO
|
||||
// 2. Even if VIRTIO_IOMMU_T_DETACH is implemented in front-end driver, it could violate
|
||||
// the following virtio IOMMU spec: Detach an endpoint from a domain. when this request
|
||||
// 1. Seems it's not possible to dynamically attach and detach a IOMMU domain if the virtio
|
||||
// IOMMU device is running on top of VFIO
|
||||
// 2. Even if VIRTIO_IOMMU_T_DETACH is implemented in front-end driver, it could violate the
|
||||
// following virtio IOMMU spec: Detach an endpoint from a domain. when this request
|
||||
// completes, the endpoint cannot access any mapping from that domain anymore.
|
||||
//
|
||||
// This is because VFIO doesn't support detaching a single device. When the virtio-iommu
|
||||
|
|
|
@ -452,8 +452,8 @@ impl PvClockWorker {
|
|||
(
|
||||
Self::get_suspended_duration(&suspend_time),
|
||||
// SAFETY:
|
||||
// Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify any
|
||||
// other memory.
|
||||
// Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify
|
||||
// any other memory.
|
||||
unsafe { _rdtsc() } - suspend_time.tsc_value,
|
||||
)
|
||||
} else {
|
||||
|
|
|
@ -49,8 +49,8 @@ pub struct QueueConfig {
|
|||
max_size: u16,
|
||||
|
||||
/// The queue size in elements the driver selected. This is always guaranteed to be a power of
|
||||
/// two less than or equal to `max_size`, as required for split virtqueues. These invariants are
|
||||
/// enforced by `set_size()`.
|
||||
/// two less than or equal to `max_size`, as required for split virtqueues. These invariants
|
||||
/// are enforced by `set_size()`.
|
||||
size: u16,
|
||||
|
||||
/// Indicates if the queue is finished with configuration
|
||||
|
@ -59,7 +59,8 @@ pub struct QueueConfig {
|
|||
/// MSI-X vector for the queue. Don't care for INTx
|
||||
vector: u16,
|
||||
|
||||
/// Ring features (e.g. `VIRTIO_RING_F_EVENT_IDX`, `VIRTIO_F_RING_PACKED`) offered by the device
|
||||
/// Ring features (e.g. `VIRTIO_RING_F_EVENT_IDX`, `VIRTIO_F_RING_PACKED`) offered by the
|
||||
/// device
|
||||
features: u64,
|
||||
|
||||
// Device feature bits accepted by the driver
|
||||
|
@ -267,7 +268,8 @@ impl QueueConfig {
|
|||
if self.activated {
|
||||
bail!("queue is already activated");
|
||||
}
|
||||
// If VIRTIO_F_RING_PACKED feature bit is set, create a packed queue, otherwise create a split queue
|
||||
// If VIRTIO_F_RING_PACKED feature bit is set, create a packed queue, otherwise create a
|
||||
// split queue
|
||||
let queue: Queue = if ((self.acked_features >> VIRTIO_F_RING_PACKED) & 1) != 0 {
|
||||
let pq =
|
||||
PackedQueue::new(self, mem, event).context("Failed to create a packed queue.")?;
|
||||
|
@ -335,13 +337,15 @@ impl QueueConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Usage: define_queue_method!(method_name, return_type[, mut][, arg1: arg1_type, arg2: arg2_type, ...])
|
||||
/// Usage: define_queue_method!(method_name, return_type[, mut][, arg1: arg1_type, arg2: arg2_type,
|
||||
/// ...])
|
||||
///
|
||||
/// - `method_name`: The name of the method to be defined (as an identifier).
|
||||
/// - `return_type`: The return type of the method.
|
||||
/// - `mut` (optional): Include this keyword if the method requires a mutable reference to `self` (`&mut self`).
|
||||
/// - `arg1: arg1_type, arg2: arg2_type, ...` (optional): Include method parameters as a comma-separated list
|
||||
/// of `name: type` pairs, if the method takes any arguments.
|
||||
/// - `mut` (optional): Include this keyword if the method requires a mutable reference to `self`
|
||||
/// (`&mut self`).
|
||||
/// - `arg1: arg1_type, arg2: arg2_type, ...` (optional): Include method parameters as a
|
||||
/// comma-separated list of `name: type` pairs, if the method takes any arguments.
|
||||
macro_rules! define_queue_method {
|
||||
(
|
||||
$(#[$doc:meta])*
|
||||
|
|
|
@ -212,8 +212,8 @@ impl PackedQueue {
|
|||
|
||||
/// Set the device event suppression
|
||||
///
|
||||
// This field is used to specify the timing of when the driver notifies the
|
||||
// device that the descriptor table is ready to be processed.
|
||||
/// This field is used to specify the timing of when the driver notifies the
|
||||
/// device that the descriptor table is ready to be processed.
|
||||
fn set_avail_event(&mut self, event: PackedDescEvent) {
|
||||
fence(Ordering::SeqCst);
|
||||
self.mem
|
||||
|
|
|
@ -345,11 +345,11 @@ impl SplitQueue {
|
|||
/// (similar to how DC circuits are analyzed).
|
||||
///
|
||||
/// The two distances are as follows:
|
||||
/// * `A` is the distance between the driver's requested notification
|
||||
/// point, and the current position in the ring.
|
||||
/// * `A` is the distance between the driver's requested notification point, and the current
|
||||
/// position in the ring.
|
||||
///
|
||||
/// * `B` is the distance between the last time we notified the guest,
|
||||
/// and the current position in the ring.
|
||||
/// * `B` is the distance between the last time we notified the guest, and the current position
|
||||
/// in the ring.
|
||||
///
|
||||
/// If we graph these distances for the situation where we want to notify
|
||||
/// the guest, and when we don't want to notify the guest, we see that
|
||||
|
@ -389,15 +389,13 @@ impl SplitQueue {
|
|||
/// anymore. (Notifications will never be sent.) But why is that? The algebra
|
||||
/// here *appears* to work out, but all semantic meaning is lost. There are
|
||||
/// two explanations for why this happens:
|
||||
/// * The intuitive one: the terms in the inequality are not actually
|
||||
/// separable; in other words, (next_used - last_used) is an inseparable
|
||||
/// term, so subtracting next_used from both sides of the original
|
||||
/// inequality and zeroing them out is semantically invalid. But why aren't
|
||||
/// * The intuitive one: the terms in the inequality are not actually separable; in other words,
|
||||
/// (next_used - last_used) is an inseparable term, so subtracting next_used from both sides
|
||||
/// of the original inequality and zeroing them out is semantically invalid. But why aren't
|
||||
/// they separable? See below.
|
||||
/// * The theoretical one: canceling like terms relies a vector space law:
|
||||
/// a + x = b + x => a = b (cancellation law). For congruences / equality
|
||||
/// under modulo, this law is satisfied, but for inequalities under mod, it
|
||||
/// is not; therefore, we cannot cancel like terms.
|
||||
/// * The theoretical one: canceling like terms relies a vector space law: a + x = b + x => a =
|
||||
/// b (cancellation law). For congruences / equality under modulo, this law is satisfied, but
|
||||
/// for inequalities under mod, it is not; therefore, we cannot cancel like terms.
|
||||
///
|
||||
/// ```text
|
||||
/// ┌──────────────────────────────────┐
|
||||
|
|
|
@ -374,7 +374,8 @@ impl Inquiry {
|
|||
0xb2 => {
|
||||
// Page length
|
||||
outbuf[3] = 4;
|
||||
// skip outbuf[4]: crosvm does not support logical block provisioning threshold sets.
|
||||
// skip outbuf[4]: crosvm does not support logical block provisioning threshold
|
||||
// sets.
|
||||
const UNMAP: u8 = 1 << 7;
|
||||
const WRITE_SAME_16: u8 = 1 << 6;
|
||||
const WRITE_SAME_10: u8 = 1 << 5;
|
||||
|
|
|
@ -727,11 +727,12 @@ async fn notify_reset_signal(reset_signal: &(AsyncRwLock<bool>, Condvar)) {
|
|||
|
||||
/// Runs all workers once and exit if any worker exit.
|
||||
///
|
||||
/// Returns [`LoopState::Break`] if the worker `f_kill` or `f_resample` exit, or something went wrong
|
||||
/// on shutdown process. The caller should not run the worker again and should exit the main loop.
|
||||
/// Returns [`LoopState::Break`] if the worker `f_kill` or `f_resample` exit, or something went
|
||||
/// wrong on shutdown process. The caller should not run the worker again and should exit the main
|
||||
/// loop.
|
||||
///
|
||||
/// If this function returns [`LoopState::Continue`], the caller can continue the main loop by resetting
|
||||
/// the streams and run the worker again.
|
||||
/// If this function returns [`LoopState::Continue`], the caller can continue the main loop by
|
||||
/// resetting the streams and run the worker again.
|
||||
fn run_worker_once(
|
||||
ex: &Executor,
|
||||
streams: &Rc<AsyncRwLock<Vec<AsyncRwLock<StreamInfo>>>>,
|
||||
|
|
|
@ -56,7 +56,8 @@ pub struct StreamInfoBuilder {
|
|||
impl StreamInfoBuilder {
|
||||
/// Creates a StreamInfoBuilder with minimal required fields:
|
||||
///
|
||||
/// * `stream_source_generator`: Generator which generates stream source in [`StreamInfo::prepare()`].
|
||||
/// * `stream_source_generator`: Generator which generates stream source in
|
||||
/// [`StreamInfo::prepare()`].
|
||||
pub fn new(stream_source_generator: Arc<SysAudioStreamSourceGenerator>) -> Self {
|
||||
StreamInfoBuilder {
|
||||
stream_source_generator,
|
||||
|
@ -64,8 +65,8 @@ impl StreamInfoBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the [`StreamEffect`]s to use when creating a stream from the stream source in [`StreamInfo::prepare()`].
|
||||
/// The default value is no effects.
|
||||
/// Set the [`StreamEffect`]s to use when creating a stream from the stream source in
|
||||
/// [`StreamInfo::prepare()`]. The default value is no effects.
|
||||
pub fn effects(mut self, effects: Vec<StreamEffect>) -> Self {
|
||||
self.effects = effects;
|
||||
self
|
||||
|
@ -392,7 +393,8 @@ impl StreamInfo {
|
|||
buffer_bytes: self.buffer_bytes,
|
||||
period_bytes: self.period_bytes,
|
||||
direction: self.direction, // VIRTIO_SND_D_*
|
||||
state: self.state, // VIRTIO_SND_R_PCM_SET_PARAMS -> VIRTIO_SND_R_PCM_STOP, or 0 (uninitialized)
|
||||
// VIRTIO_SND_R_PCM_SET_PARAMS -> VIRTIO_SND_R_PCM_STOP, or 0 (uninitialized)
|
||||
state: self.state,
|
||||
effects: self.effects.clone(),
|
||||
just_reset: self.just_reset,
|
||||
}
|
||||
|
|
|
@ -38,7 +38,8 @@ pub enum Error {
|
|||
/// Failed to parse parameters.
|
||||
#[error("Invalid snd parameter: {0}")]
|
||||
UnknownParameter(String),
|
||||
/// Invalid PCM device config index. Happens when the length of PCM device config is less than the number of PCM devices.
|
||||
/// Invalid PCM device config index. Happens when the length of PCM device config is less than
|
||||
/// the number of PCM devices.
|
||||
#[error("Invalid PCM device config index: {0}")]
|
||||
InvalidPCMDeviceConfigIndex(usize),
|
||||
/// Invalid PCM info direction (VIRTIO_SND_D_OUTPUT = 0, VIRTIO_SND_D_INPUT = 1)
|
||||
|
|
|
@ -115,9 +115,10 @@ impl StreamInfo {
|
|||
self.channels as usize,
|
||||
self.format,
|
||||
self.frame_rate as usize,
|
||||
// `buffer_size` in `audio_streams` API indicates the buffer size in bytes that the stream
|
||||
// consumes (or transmits) each time (next_playback/capture_buffer).
|
||||
// `period_bytes` in virtio-snd device (or ALSA) indicates the device transmits (or
|
||||
// `buffer_size` in `audio_streams` API indicates the buffer size in bytes that the
|
||||
// stream consumes (or transmits) each time
|
||||
// (next_playback/capture_buffer). `period_bytes` in virtio-snd
|
||||
// device (or ALSA) indicates the device transmits (or
|
||||
// consumes) for each PCM message.
|
||||
// Therefore, `buffer_size` in `audio_streams` == `period_bytes` in virtio-snd.
|
||||
self.period_bytes / frame_size,
|
||||
|
|
|
@ -344,8 +344,8 @@ impl Stream {
|
|||
|
||||
impl Drop for Stream {
|
||||
fn drop(&mut self) {
|
||||
// Try to stop and release the stream in case it was playing, these operations will fail if the
|
||||
// stream is already released, just ignore that failure
|
||||
// Try to stop and release the stream in case it was playing, these operations will fail if
|
||||
// the stream is already released, just ignore that failure
|
||||
let _ = self.vios_client.lock().stop_stream(self.stream_id);
|
||||
let _ = self.vios_client.lock().release_stream(self.stream_id);
|
||||
|
||||
|
|
|
@ -252,7 +252,8 @@ impl Worker {
|
|||
} else {
|
||||
(
|
||||
VIRTIO_SND_S_OK,
|
||||
// Safe to unwrap because we just ensured all the ids are valid
|
||||
// Safe to unwrap because we just ensured all the ids are
|
||||
// valid
|
||||
(start_id..end_id)
|
||||
.map(|id| {
|
||||
self.vios_client.lock().jack_info(id).unwrap()
|
||||
|
@ -319,7 +320,8 @@ impl Worker {
|
|||
} else {
|
||||
(
|
||||
VIRTIO_SND_S_OK,
|
||||
// Safe to unwrap because we just ensured all the ids are valid
|
||||
// Safe to unwrap because we just ensured all the ids are
|
||||
// valid
|
||||
(start_id..end_id)
|
||||
.map(|id| {
|
||||
self.vios_client.lock().chmap_info(id).unwrap()
|
||||
|
@ -348,7 +350,8 @@ impl Worker {
|
|||
} else {
|
||||
(
|
||||
VIRTIO_SND_S_OK,
|
||||
// Safe to unwrap because we just ensured all the ids are valid
|
||||
// Safe to unwrap because we just ensured all the ids are
|
||||
// valid
|
||||
(start_id..end_id)
|
||||
.map(|id| {
|
||||
self.vios_client.lock().stream_info(id).unwrap()
|
||||
|
|
|
@ -231,7 +231,8 @@ fn create_vu_multi_port_device(
|
|||
let port = x
|
||||
.create_serial_device::<ConsolePort>(
|
||||
ProtectionType::Unprotected,
|
||||
// We need to pass an event as per Serial Device API but we don't really use it anyway.
|
||||
// We need to pass an event as per Serial Device API but we don't really use it
|
||||
// anyway.
|
||||
&Event::new()?,
|
||||
keep_rds,
|
||||
)
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
// Implementation note:
|
||||
// This code lets us take advantage of the vmm_vhost low level implementation of the vhost user
|
||||
// protocol. DeviceRequestHandler implements the VhostUserSlaveReqHandler trait from vmm_vhost,
|
||||
|
|
|
@ -33,7 +33,8 @@ pub fn read_from_tube_transporter(
|
|||
let tube_transporter = TubeTransporterReader::create_tube_transporter_reader(
|
||||
// SAFETY:
|
||||
// Safe because we know that raw_transport_tube is valid (passed by inheritance), and that
|
||||
// the blocking & framing modes are accurate because we create them ourselves in the broker.
|
||||
// the blocking & framing modes are accurate because we create them ourselves in the
|
||||
// broker.
|
||||
unsafe {
|
||||
PipeConnection::from_raw_descriptor(
|
||||
raw_transport_tube,
|
||||
|
|
|
@ -74,7 +74,8 @@ pub trait VhostUserDevice {
|
|||
ex: &Executor,
|
||||
) -> anyhow::Result<Box<dyn VhostUserSlaveReqHandler>>;
|
||||
|
||||
/// The preferred ExecutorKind of an Executor to accept by [`VhostUserDevice::into_req_handler()`].
|
||||
/// The preferred ExecutorKind of an Executor to accept by
|
||||
/// [`VhostUserDevice::into_req_handler()`].
|
||||
fn executor_kind(&self) -> Option<ExecutorKind> {
|
||||
None
|
||||
}
|
||||
|
|
|
@ -55,8 +55,8 @@ struct InputBuffer {
|
|||
mapping: MemoryMappingArena,
|
||||
/// Resource ID that we will signal using `NotifyEndOfBitstreamBuffer` upon destruction.
|
||||
resource_id: u32,
|
||||
/// Pointer to the event queue to send the `NotifyEndOfBitstreamBuffer` event to. The event will
|
||||
/// not be sent if the pointer becomes invalid.
|
||||
/// Pointer to the event queue to send the `NotifyEndOfBitstreamBuffer` event to. The event
|
||||
/// will not be sent if the pointer becomes invalid.
|
||||
event_queue: Weak<SyncEventQueue<DecoderEvent>>,
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ pub enum Token {
|
|||
}
|
||||
|
||||
/// A tag for commands being processed asynchronously in the back-end device.
|
||||
///
|
||||
/// TODO(b/149720783): Remove this enum by using async primitives.
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
|
||||
pub enum AsyncCmdTag {
|
||||
|
@ -102,11 +103,13 @@ pub enum VideoEvtResponseType {
|
|||
|
||||
pub trait Device {
|
||||
/// Processes a virtio-video command.
|
||||
/// If the command expects a synchronous response, it returns a response as `VideoCmdResponseType::Sync`.
|
||||
/// Otherwise, it returns a name of the descriptor chain that will be used when a response is prepared.
|
||||
/// Implementations of this method is passed a WaitContext object which can be used to add or remove
|
||||
/// descriptors to wait on. It is expected that only Token::Event items would be added. When a Token::Event
|
||||
/// event arrives, process_event() will be invoked.
|
||||
/// If the command expects a synchronous response, it returns a response as
|
||||
/// `VideoCmdResponseType::Sync`. Otherwise, it returns a name of the descriptor chain that
|
||||
/// will be used when a response is prepared. Implementations of this method is passed a
|
||||
/// WaitContext object which can be used to add or remove descriptors to wait on. It is
|
||||
/// expected that only Token::Event items would be added. When a Token::Event event arrives,
|
||||
/// process_event() will be invoked.
|
||||
///
|
||||
/// TODO(b/149720783): Make this an async function.
|
||||
fn process_cmd(
|
||||
&mut self,
|
||||
|
@ -119,8 +122,10 @@ pub trait Device {
|
|||
|
||||
/// Processes an available `Token::Event` event and returns a list of `VideoEvtResponseType`
|
||||
/// responses. It returns None if an invalid event comes.
|
||||
/// For responses to be sent via command queue, the return type is `VideoEvtResponseType::AsyncCmd`.
|
||||
/// For responses to be sent via event queue, the return type is `VideoEvtResponseType::Event`.
|
||||
/// For responses to be sent via command queue, the return type is
|
||||
/// `VideoEvtResponseType::AsyncCmd`. For responses to be sent via event queue, the return
|
||||
/// type is `VideoEvtResponseType::Event`.
|
||||
///
|
||||
/// TODO(b/149720783): Make this an async function.
|
||||
fn process_event(
|
||||
&mut self,
|
||||
|
|
|
@ -105,9 +105,9 @@ pub struct FfmpegEncoderSession {
|
|||
output_queue: VecDeque<(OutputBufferId, MemoryMappingArena)>,
|
||||
/// `true` if a flush is pending. While a pending flush exist, input buffers are temporarily
|
||||
/// held on and not sent to the encoder. An actual flush call will be issued when we run out of
|
||||
/// output buffers (to defend against FFmpeg bugs), and we'll try to receive outputs again until
|
||||
/// we receive another code indicating the flush has completed, at which point this flag will
|
||||
/// be reset.
|
||||
/// output buffers (to defend against FFmpeg bugs), and we'll try to receive outputs again
|
||||
/// until we receive another code indicating the flush has completed, at which point this
|
||||
/// flag will be reset.
|
||||
is_flushing: bool,
|
||||
|
||||
/// The libav context for this session.
|
||||
|
|
|
@ -121,7 +121,8 @@ impl LibvdaEncoder {
|
|||
let mut parsed_formats: BTreeMap<Format, ParsedFormat> = BTreeMap::new();
|
||||
|
||||
for output_format in output_formats.iter() {
|
||||
// TODO(alexlau): Consider using `max_framerate_numerator` and `max_framerate_denominator`.
|
||||
// TODO(alexlau): Consider using `max_framerate_numerator` and
|
||||
// `max_framerate_denominator`.
|
||||
let libvda::encode::OutputProfile {
|
||||
profile: libvda_profile,
|
||||
max_width,
|
||||
|
|
|
@ -671,8 +671,8 @@ impl<T: Encoder> EncoderDevice<T> {
|
|||
GuestResource::from_virtio_object_entry(
|
||||
// SAFETY:
|
||||
// Safe because we confirmed the correct type for the resource.
|
||||
// unwrap() is also safe here because we just tested above that `entries` had
|
||||
// exactly one element.
|
||||
// unwrap() is also safe here because we just tested above that
|
||||
// `entries` had exactly one element.
|
||||
unsafe { entries.get(0).unwrap().object },
|
||||
&self.resource_bridge,
|
||||
&stream.src_params,
|
||||
|
@ -721,8 +721,8 @@ impl<T: Encoder> EncoderDevice<T> {
|
|||
GuestResource::from_virtio_object_entry(
|
||||
// SAFETY:
|
||||
// Safe because we confirmed the correct type for the resource.
|
||||
// unwrap() is also safe here because we just tested above that `entries` had
|
||||
// exactly one element.
|
||||
// unwrap() is also safe here because we just tested above that
|
||||
// `entries` had exactly one element.
|
||||
unsafe { entries.get(0).unwrap().object },
|
||||
&self.resource_bridge,
|
||||
&stream.dst_params,
|
||||
|
@ -866,9 +866,9 @@ impl<T: Encoder> EncoderDevice<T> {
|
|||
let buffer_size = dst_resource.resource.planes[0].size as u32;
|
||||
|
||||
// Stores an output buffer to notify EOS.
|
||||
// This is necessary because libvda is unable to indicate EOS along with returned buffers.
|
||||
// For now, when a `Flush()` completes, this saved resource will be returned as a zero-sized
|
||||
// buffer with the EOS flag.
|
||||
// This is necessary because libvda is unable to indicate EOS along with returned
|
||||
// buffers. For now, when a `Flush()` completes, this saved resource
|
||||
// will be returned as a zero-sized buffer with the EOS flag.
|
||||
if stream.eos_manager.try_reserve_eos_buffer(resource_id) {
|
||||
return Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
|
||||
stream_id,
|
||||
|
|
|
@ -186,8 +186,8 @@ impl PlaneFormat {
|
|||
},
|
||||
// UV plane, 1 sample per group of 4 pixels for U and V.
|
||||
PlaneFormat {
|
||||
// Add one vertical line so odd resolutions result in an extra UV line to cover all the
|
||||
// Y samples.
|
||||
// Add one vertical line so odd resolutions result in an extra UV line to cover
|
||||
// all the Y samples.
|
||||
plane_size: width * half_height,
|
||||
stride: width,
|
||||
},
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
//! * Derive implementations of AsBytes and FromBytes for each struct as needed.
|
||||
//! * Added GET_PARAMS_EXT and SET_PARAMS_EXT to allow querying and changing the resource type
|
||||
//! dynamically.
|
||||
//! * Moved some definitions such as virtio_video_config to device_constants to make them visible
|
||||
//! to vhost-user modules, and also pub-use them.
|
||||
//! * Moved some definitions such as virtio_video_config to device_constants to make them visible to
|
||||
//! vhost-user modules, and also pub-use them.
|
||||
|
||||
#![allow(dead_code, non_snake_case, non_camel_case_types)]
|
||||
|
||||
|
|
|
@ -181,8 +181,8 @@ impl Worker {
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `device` - Instance of backend device
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Event` for a new stream session
|
||||
/// to `wait_ctx`
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Event` for a new stream session to
|
||||
/// `wait_ctx`
|
||||
/// * `desc` - `DescriptorChain` to handle
|
||||
fn handle_command_desc(
|
||||
&mut self,
|
||||
|
@ -266,8 +266,8 @@ impl Worker {
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `device` - Instance of backend device
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Event` for a new stream session
|
||||
/// to `wait_ctx`
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Event` for a new stream session to
|
||||
/// `wait_ctx`
|
||||
fn handle_command_queue(
|
||||
&mut self,
|
||||
device: &mut dyn Device,
|
||||
|
@ -286,8 +286,8 @@ impl Worker {
|
|||
///
|
||||
/// * `device` - Instance of backend device
|
||||
/// * `stream_id` - Stream session ID of the event
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Buffer` for a new stream session
|
||||
/// to `wait_ctx`
|
||||
/// * `wait_ctx` - `device` may register a new `Token::Buffer` for a new stream session to
|
||||
/// `wait_ctx`
|
||||
fn handle_event(
|
||||
&mut self,
|
||||
device: &mut dyn Device,
|
||||
|
|
|
@ -398,9 +398,9 @@ struct VsockConnection {
|
|||
prev_recv_cnt: usize,
|
||||
|
||||
// Total auxiliary buffer space available to receive packets from the driver, not including
|
||||
// the virtqueue itself. For us, this is tx buffer on the named pipe into which we drain packets
|
||||
// for the connection. Note that if the named pipe has a grow on demand TX buffer, we use
|
||||
// DEFAULT_BUF_ALLOC instead.
|
||||
// the virtqueue itself. For us, this is tx buffer on the named pipe into which we drain
|
||||
// packets for the connection. Note that if the named pipe has a grow on demand TX buffer,
|
||||
// we use DEFAULT_BUF_ALLOC instead.
|
||||
buf_alloc: usize,
|
||||
|
||||
// Peer (driver) total free-running count of received bytes.
|
||||
|
@ -1155,7 +1155,8 @@ impl Worker {
|
|||
fwd_cnt: 0.into(),
|
||||
..Default::default()
|
||||
};
|
||||
// Safe because virtio_vsock_hdr is a simple data struct and converts cleanly to bytes
|
||||
// Safe because virtio_vsock_hdr is a simple data struct and converts cleanly to
|
||||
// bytes
|
||||
self.write_bytes_to_queue(
|
||||
&mut *send_queue.lock().await,
|
||||
rx_queue_evt,
|
||||
|
|
|
@ -59,17 +59,17 @@ const MAJOR_VERSION: u16 = 1;
|
|||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, AsBytes, FromZeroes, FromBytes)]
|
||||
struct SparseHeader {
|
||||
magic: Le32, /* SPARSE_HEADER_MAGIC */
|
||||
major_version: Le16, /* (0x1) - reject images with higher major versions */
|
||||
minor_version: Le16, /* (0x0) - allow images with higer minor versions */
|
||||
file_hdr_sz: Le16, /* 28 bytes for first revision of the file format */
|
||||
chunk_hdr_size: Le16, /* 12 bytes for first revision of the file format */
|
||||
blk_sz: Le32, /* block size in bytes, must be a multiple of 4 (4096) */
|
||||
total_blks: Le32, /* total blocks in the non-sparse output image */
|
||||
total_chunks: Le32, /* total chunks in the sparse input image */
|
||||
image_checksum: Le32, /* CRC32 checksum of the original data, counting "don't care" */
|
||||
/* as 0. Standard 802.3 polynomial, use a Public Domain */
|
||||
/* table implementation */
|
||||
magic: Le32, // SPARSE_HEADER_MAGIC
|
||||
major_version: Le16, // (0x1) - reject images with higher major versions
|
||||
minor_version: Le16, // (0x0) - allow images with higer minor versions
|
||||
file_hdr_sz: Le16, // 28 bytes for first revision of the file format
|
||||
chunk_hdr_size: Le16, // 12 bytes for first revision of the file format
|
||||
blk_sz: Le32, // block size in bytes, must be a multiple of 4 (4096)
|
||||
total_blks: Le32, // total blocks in the non-sparse output image
|
||||
total_chunks: Le32, // total chunks in the sparse input image
|
||||
// CRC32 checksum of the original data, counting "don't care" as 0. Standard 802.3 polynomial,
|
||||
// use a Public Domain table implementation
|
||||
image_checksum: Le32,
|
||||
}
|
||||
|
||||
const CHUNK_TYPE_RAW: u16 = 0xCAC1;
|
||||
|
|
|
@ -219,7 +219,8 @@ impl CompositeDiskFile {
|
|||
};
|
||||
let comp_file = open_file_or_duplicate(
|
||||
&path,
|
||||
OpenOptions::new().read(true).write(writable), // TODO(b/190435784): add support for O_DIRECT.
|
||||
OpenOptions::new().read(true).write(writable), /* TODO(b/190435784): add
|
||||
* support for O_DIRECT. */
|
||||
)
|
||||
.map_err(|e| Error::OpenFile(e.into(), disk.file_path.to_string()))?;
|
||||
|
||||
|
@ -1302,8 +1303,8 @@ mod tests {
|
|||
// Write to the RW part so that some fsync operation will occur.
|
||||
composite.write_zeroes_at(0, 20).await.unwrap();
|
||||
|
||||
// This is the test's assert. fsyncing should NOT touch a read-only disk part. On Windows,
|
||||
// this would be an error.
|
||||
// This is the test's assert. fsyncing should NOT touch a read-only disk part. On
|
||||
// Windows, this would be an error.
|
||||
composite.fsync().await.expect(
|
||||
"Failed to fsync composite disk. \
|
||||
This can happen if the disk writable state is wrong.",
|
||||
|
|
|
@ -25,7 +25,8 @@ pub enum Error {
|
|||
/// `NeedCluster` - Handle this error by reading the cluster and calling the function again.
|
||||
#[error("cluster with addr={0} needs to be read")]
|
||||
NeedCluster(u64),
|
||||
/// `NeedNewCluster` - Handle this error by allocating a cluster and calling the function again.
|
||||
/// `NeedNewCluster` - Handle this error by allocating a cluster and calling the function
|
||||
/// again.
|
||||
#[error("new cluster needs to be allocated for refcounts")]
|
||||
NeedNewCluster,
|
||||
/// `ReadingRefCounts` - Error reading the file in to the refcount cache.
|
||||
|
|
|
@ -96,8 +96,7 @@ impl TestVmSys {
|
|||
}
|
||||
|
||||
// Adds 2 serial devices:
|
||||
// - ttyS0: Console device which prints kernel log / debug output of the
|
||||
// delegate binary.
|
||||
// - ttyS0: Console device which prints kernel log / debug output of the delegate binary.
|
||||
// - ttyS1: Serial device attached to the named pipes.
|
||||
fn configure_serial_devices(
|
||||
command: &mut Command,
|
||||
|
|
|
@ -135,7 +135,8 @@ pub struct TestVmSys {
|
|||
>,
|
||||
>,
|
||||
pub(crate) to_guest: Arc<Mutex<PipeConnection>>,
|
||||
pub(crate) process: Option<Child>, // Use `Option` to allow taking the ownership in `Drop::drop()`.
|
||||
pub(crate) process: Option<Child>, /* Use `Option` to allow taking the ownership in
|
||||
* `Drop::drop()`. */
|
||||
}
|
||||
|
||||
impl TestVmSys {
|
||||
|
@ -148,8 +149,7 @@ impl TestVmSys {
|
|||
}
|
||||
|
||||
// Adds 2 serial devices:
|
||||
// - ttyS0: Console device which prints kernel log / debug output of the
|
||||
// delegate binary.
|
||||
// - ttyS0: Console device which prints kernel log / debug output of the delegate binary.
|
||||
// - ttyS1: Serial device attached to the named pipes.
|
||||
fn configure_serial_devices(
|
||||
command: &mut Command,
|
||||
|
|
|
@ -12,8 +12,8 @@ fn main() -> Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// Fallback main function to make the library's serialize / deserialize implementation usable on the e2etest side
|
||||
// (which may not be Linux environment).
|
||||
// Fallback main function to make the library's serialize / deserialize implementation usable on the
|
||||
// e2etest side (which may not be Linux environment).
|
||||
// This workaround is needed due to cargo's dependency limitations.
|
||||
// c.f. https://github.com/rust-lang/cargo/issues/1982
|
||||
#[cfg(not(any(target_os = "linux", target_os = "android")))]
|
||||
|
|
|
@ -49,10 +49,8 @@ fn copy_file() {
|
|||
|
||||
/// Tests file ownership seen by the VM.
|
||||
///
|
||||
/// 1. Create `user_file.txt` owned by the current user of the host on a
|
||||
/// temporal directory.
|
||||
/// 2. Set virtiofs options: uidmap=<mapped-uid> <current-uid> 1,
|
||||
/// uid=<mapped-uid>.
|
||||
/// 1. Create `user_file.txt` owned by the current user of the host on a temporal directory.
|
||||
/// 2. Set virtiofs options: uidmap=<mapped-uid> <current-uid> 1, uid=<mapped-uid>.
|
||||
/// 3. Start a VM with a virtiofs device for the temporal directory.
|
||||
/// 4. Check that `user_file.txt`'s uid is <mapped-uid> in the VM.
|
||||
/// 5. Verify gid similarly.
|
||||
|
|
|
@ -15,8 +15,8 @@ use tempfile::TempDir;
|
|||
/// Tests audio playback on virtio-snd with file backend
|
||||
///
|
||||
/// 1. Create a temporal directory for the audio file.
|
||||
/// 2. Start a VM with a virtiofs device for the temporal directory
|
||||
/// and a virtio-snd device with file backend.
|
||||
/// 2. Start a VM with a virtiofs device for the temporal directory and a virtio-snd device with
|
||||
/// file backend.
|
||||
/// 3. Create a raw audio file in the temporal directory with sox.
|
||||
/// 4. Do playback with aplay.
|
||||
/// 5. Compare the generated audio file and the output from virtio-snd.
|
||||
|
|
|
@ -77,9 +77,8 @@ impl Entry {
|
|||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `negative_timeout` - The duration for which this negative d_entry
|
||||
/// should be considered valid. After the timeout expires, the d_entry
|
||||
/// will be invalidated.
|
||||
/// * `negative_timeout` - The duration for which this negative d_entry should be considered
|
||||
/// valid. After the timeout expires, the d_entry will be invalidated.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
|
@ -101,8 +100,8 @@ impl Entry {
|
|||
/// Represents information about an entry in a directory.
|
||||
pub struct DirEntry<'a> {
|
||||
/// The inode number for this entry. This does NOT have to be the same as the `Inode` for this
|
||||
/// directory entry. However, it must be the same as the `attr.st_ino` field of the `Entry` that
|
||||
/// would be returned by a `lookup` request in the parent directory for `name`.
|
||||
/// directory entry. However, it must be the same as the `attr.st_ino` field of the `Entry`
|
||||
/// that would be returned by a `lookup` request in the parent directory for `name`.
|
||||
pub ino: libc::ino64_t,
|
||||
|
||||
/// Any non-zero value that the kernel can use to identify the current point in the directory
|
||||
|
@ -127,8 +126,8 @@ pub enum GetxattrReply {
|
|||
|
||||
/// The size of the buffer needed to hold the value of the requested extended attribute. Should
|
||||
/// be returned when the `size` parameter is 0. Callers should note that it is still possible
|
||||
/// for the size of the value to change in between `getxattr` calls and should not assume that a
|
||||
/// subsequent call to `getxattr` with the returned count will always succeed.
|
||||
/// for the size of the value to change in between `getxattr` calls and should not assume that
|
||||
/// a subsequent call to `getxattr` with the returned count will always succeed.
|
||||
Count(u32),
|
||||
}
|
||||
|
||||
|
@ -142,17 +141,17 @@ pub enum ListxattrReply {
|
|||
/// This size of the buffer needed to hold the full list of extended attribute names associated
|
||||
/// with this `Inode`. Should be returned when the `size` parameter is 0. Callers should note
|
||||
/// that it is still possible for the set of extended attributes to change between `listxattr`
|
||||
/// calls and so should not assume that a subsequent call to `listxattr` with the returned count
|
||||
/// will always succeed.
|
||||
/// calls and so should not assume that a subsequent call to `listxattr` with the returned
|
||||
/// count will always succeed.
|
||||
Count(u32),
|
||||
}
|
||||
|
||||
/// A reply to an `ioctl` method call.
|
||||
pub enum IoctlReply {
|
||||
/// Indicates that the ioctl should be retried. This is only a valid reply when the `flags`
|
||||
/// field of the ioctl request contains `IoctlFlags::UNRESTRICTED`. The kernel will read in data
|
||||
/// and prepare output buffers as specified in the `input` and `output` fields before re-sending
|
||||
/// the ioctl message.
|
||||
/// field of the ioctl request contains `IoctlFlags::UNRESTRICTED`. The kernel will read in
|
||||
/// data and prepare output buffers as specified in the `input` and `output` fields before
|
||||
/// re-sending the ioctl message.
|
||||
Retry {
|
||||
/// Data that should be read by the kernel module and sent to the server when the ioctl is
|
||||
/// retried.
|
||||
|
@ -1030,7 +1029,6 @@ pub trait FileSystem {
|
|||
///
|
||||
/// The lookup count for `Inode`s associated with the returned directory entries is **NOT**
|
||||
/// affected by this method.
|
||||
///
|
||||
fn readdir(
|
||||
&self,
|
||||
ctx: Context,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue