Make VolatileSlice ABI-compatible with iovec

Change VolatileSlice so that it is ABI-compatible with iovec.  This
allows us to directly pass in a VolatileSlice for a C function that
expects an iovec without having to create temporaries that convert from
one to the other.

Also change all the parameters from u64 to usize.  It's not possible to
address more memory than fits into a usize so having u64 here didn't
really provide much benefit and led to a lot of tedious casting back and
forth all over the place.

BUG=none
TEST=unit tests

Cq-Depend: chromium:2206621
Change-Id: I258f9123c603d9a4c6c5e2d4d10eb4aedf74466d
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2203998
Tested-by: Chirantan Ekbote <chirantan@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Commit-Queue: Chirantan Ekbote <chirantan@chromium.org>
This commit is contained in:
Chirantan Ekbote 2020-05-15 20:06:58 +09:00 committed by Commit Bot
parent be5824412c
commit e7d1221c9d
19 changed files with 470 additions and 377 deletions

1
Cargo.lock generated
View file

@ -161,6 +161,7 @@ name = "data_model"
version = "0.1.0"
dependencies = [
"assertions 0.1.0",
"libc 0.2.44 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]

View file

@ -7,5 +7,6 @@ include = ["src/**/*", "Cargo.toml"]
[dependencies]
assertions = { path = "../assertions" } # provided by ebuild
libc = "*"
[workspace]

View file

@ -20,21 +20,25 @@
//! not reordered or elided the access.
use std::cmp::min;
use std::fmt::{self, Display};
use std::ffi::c_void;
use std::fmt::{self, Debug, Display};
use std::marker::PhantomData;
use std::mem::size_of;
use std::ptr::{copy, null_mut, read_volatile, write_bytes, write_volatile};
use std::result;
use std::slice;
use std::usize;
use libc::iovec;
use crate::DataInit;
#[derive(Eq, PartialEq, Debug)]
pub enum VolatileMemoryError {
/// `addr` is out of bounds of the volatile memory slice.
OutOfBounds { addr: u64 },
/// Taking a slice at `base` with `offset` would overflow `u64`.
Overflow { base: u64, offset: u64 },
OutOfBounds { addr: usize },
/// Taking a slice at `base` with `offset` would overflow `usize`.
Overflow { base: usize, offset: usize },
}
impl Display for VolatileMemoryError {
@ -65,7 +69,7 @@ type Result<T> = VolatileMemoryResult<T>;
///
/// ```
/// # use data_model::*;
/// # fn get_slice(offset: u64, count: u64) -> VolatileMemoryResult<()> {
/// # fn get_slice(offset: usize, count: usize) -> VolatileMemoryResult<()> {
/// let mem_end = calc_offset(offset, count)?;
/// if mem_end > 100 {
/// return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
@ -73,7 +77,7 @@ type Result<T> = VolatileMemoryResult<T>;
/// # Ok(())
/// # }
/// ```
pub fn calc_offset(base: u64, offset: u64) -> Result<u64> {
pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
match base.checked_add(offset) {
None => Err(Error::Overflow { base, offset }),
Some(m) => Ok(m),
@ -84,109 +88,149 @@ pub fn calc_offset(base: u64, offset: u64) -> Result<u64> {
pub trait VolatileMemory {
/// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
/// access.
fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>;
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
/// Gets a `VolatileRef` at `offset`.
fn get_ref<T: DataInit>(&self, offset: u64) -> Result<VolatileRef<T>> {
let slice = self.get_slice(offset, size_of::<T>() as u64)?;
fn get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>> {
let slice = self.get_slice(offset, size_of::<T>())?;
Ok(VolatileRef {
addr: slice.addr as *mut T,
addr: slice.as_mut_ptr() as *mut T,
phantom: PhantomData,
})
}
}
impl<'a> VolatileMemory for &'a mut [u8] {
fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
let mem_end = calc_offset(offset, count)?;
if mem_end > self.len() as u64 {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(unsafe { VolatileSlice::new((self.as_ptr() as u64 + offset) as *mut _, count) })
}
}
/// A slice of raw memory that supports volatile access.
#[derive(Copy, Clone, Debug)]
/// A slice of raw memory that supports volatile access. Like `std::io::IoSliceMut`, this type is
/// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoSliceMut`, it doesn't
/// automatically deref to `&mut [u8]`.
#[derive(Copy, Clone)]
#[repr(transparent)]
pub struct VolatileSlice<'a> {
addr: *mut u8,
size: u64,
phantom: PhantomData<&'a u8>,
iov: iovec,
phantom: PhantomData<&'a mut [u8]>,
}
impl<'a> Default for VolatileSlice<'a> {
fn default() -> VolatileSlice<'a> {
VolatileSlice {
addr: null_mut(),
size: 0,
iov: iovec {
iov_base: null_mut(),
iov_len: 0,
},
phantom: PhantomData,
}
}
}
struct DebugIovec(iovec);
impl Debug for DebugIovec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("iovec")
.field("iov_base", &self.0.iov_base)
.field("iov_len", &self.0.iov_len)
.finish()
}
}
impl<'a> Debug for VolatileSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("VolatileSlice")
.field("iov", &DebugIovec(self.iov))
.field("phantom", &self.phantom)
.finish()
}
}
impl<'a> VolatileSlice<'a> {
/// Creates a slice of raw memory that must support volatile access.
///
/// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
/// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
/// must also guarantee that all other users of the given chunk of memory are using volatile
/// accesses.
pub unsafe fn new(addr: *mut u8, size: u64) -> VolatileSlice<'a> {
pub fn new(buf: &mut [u8]) -> VolatileSlice {
VolatileSlice {
addr,
size,
iov: iovec {
iov_base: buf.as_mut_ptr() as *mut c_void,
iov_len: buf.len(),
},
phantom: PhantomData,
}
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
/// Creates a `VolatileSlice` from a pointer and a length.
///
/// # Safety
///
/// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
/// and should live for the entire duration of lifetime `'a`.
pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
VolatileSlice {
iov: iovec {
iov_base: addr as *mut c_void,
iov_len: len,
},
phantom: PhantomData,
}
}
/// Gets a const pointer to this slice's memory.
pub fn as_ptr(&self) -> *const u8 {
self.iov.iov_base as *const u8
}
/// Gets a mutable pointer to this slice's memory.
pub fn as_mut_ptr(&self) -> *mut u8 {
self.iov.iov_base as *mut u8
}
/// Gets the size of this slice.
pub fn size(&self) -> u64 {
self.size
pub fn size(&self) -> usize {
self.iov.iov_len
}
/// Returns this `VolatileSlice` as an iovec.
pub fn as_iovec(&self) -> iovec {
self.iov
}
/// Converts a slice of `VolatileSlice`s into a slice of `iovec`s.
pub fn as_iovecs<'slice>(iovs: &'slice [VolatileSlice<'_>]) -> &'slice [iovec] {
// Safe because `VolatileSlice` is ABI-compatible with `iovec`.
unsafe { slice::from_raw_parts(iovs.as_ptr() as *const iovec, iovs.len()) }
}
/// Creates a copy of this slice with the address increased by `count` bytes, and the size
/// reduced by `count` bytes.
pub fn offset(self, count: u64) -> Result<VolatileSlice<'a>> {
let new_addr =
(self.addr as u64)
.checked_add(count)
.ok_or(VolatileMemoryError::Overflow {
base: self.addr as u64,
offset: count,
})?;
if new_addr > usize::MAX as u64 {
return Err(VolatileMemoryError::Overflow {
base: self.addr as u64,
pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
VolatileMemoryError::Overflow {
base: self.as_mut_ptr() as usize,
offset: count,
});
}
},
)?;
let new_size = self
.size
.size()
.checked_sub(count)
.ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
// Safe because the memory has the same lifetime and points to a subset of the memory of the
// original slice.
unsafe { Ok(VolatileSlice::new(new_addr as *mut u8, new_size)) }
unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
}
/// Similar to `get_slice` but the returned slice outlives this slice.
///
/// The returned slice's lifetime is still limited by the underlying data's lifetime.
pub fn sub_slice(self, offset: u64, count: u64) -> Result<VolatileSlice<'a>> {
pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
let mem_end = calc_offset(offset, count)?;
if mem_end > self.size {
if mem_end > self.size() {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(VolatileSlice {
addr: (self.addr as u64 + offset) as *mut _,
size: count,
phantom: PhantomData,
})
let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
VolatileMemoryError::Overflow {
base: self.as_mut_ptr() as usize,
offset,
},
)?;
// Safe because we have verified that the new memory is a subset of the original slice.
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
}
/// Sets each byte of this slice with the given byte, similar to `memset`.
@ -196,13 +240,12 @@ impl<'a> VolatileSlice<'a> {
/// # Examples
///
/// ```
/// # use data_model::VolatileMemory;
/// # use data_model::VolatileSlice;
/// # fn test_write_45() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let vslice = VolatileSlice::new(&mut mem[..]);
/// vslice.write_bytes(45);
/// for &mut v in mem_ref {
/// for &v in &mem[..] {
/// assert_eq!(v, 45);
/// }
/// # Ok(())
@ -210,7 +253,7 @@ impl<'a> VolatileSlice<'a> {
pub fn write_bytes(&self, value: u8) {
// Safe because the memory is valid and needs only byte alignment.
unsafe {
write_bytes(self.as_ptr(), value, self.size as usize);
write_bytes(self.as_mut_ptr(), value, self.size());
}
}
@ -224,11 +267,10 @@ impl<'a> VolatileSlice<'a> {
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use data_model::VolatileMemory;
/// # use data_model::VolatileSlice;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let vslice = VolatileSlice::new(&mut mem[..]);
/// let mut buf = [5u8; 16];
/// vslice.copy_to(&mut buf[..]);
/// for v in &buf[..] {
@ -241,8 +283,8 @@ impl<'a> VolatileSlice<'a> {
where
T: DataInit,
{
let mut addr = self.addr;
for v in buf.iter_mut().take(self.size as usize / size_of::<T>()) {
let mut addr = self.as_mut_ptr() as *const u8;
for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
unsafe {
*v = read_volatile(addr as *const T);
addr = addr.add(size_of::<T>());
@ -256,18 +298,21 @@ impl<'a> VolatileSlice<'a> {
/// # Examples
///
/// ```
/// # use data_model::VolatileMemory;
/// # use data_model::{VolatileMemory, VolatileSlice};
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let vslice = VolatileSlice::new(&mut mem[..]);
/// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
/// # Ok(())
/// # }
/// ```
pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
unsafe {
copy(self.addr, slice.addr, min(self.size, slice.size) as usize);
copy(
self.as_mut_ptr() as *const u8,
slice.as_mut_ptr(),
min(self.size(), slice.size()),
);
}
}
@ -281,11 +326,10 @@ impl<'a> VolatileSlice<'a> {
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use data_model::VolatileMemory;
/// # use data_model::{VolatileMemory, VolatileSlice};
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let vslice = VolatileSlice::new(&mut mem[..]);
/// let buf = [5u8; 64];
/// vslice.copy_from(&buf[..]);
/// for i in 0..4 {
@ -298,8 +342,8 @@ impl<'a> VolatileSlice<'a> {
where
T: DataInit,
{
let mut addr = self.addr;
for &v in buf.iter().take(self.size as usize / size_of::<T>()) {
let mut addr = self.as_mut_ptr();
for &v in buf.iter().take(self.size() / size_of::<T>()) {
unsafe {
write_volatile(addr as *mut T, v);
addr = addr.add(size_of::<T>());
@ -309,16 +353,8 @@ impl<'a> VolatileSlice<'a> {
}
impl<'a> VolatileMemory for VolatileSlice<'a> {
fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
let mem_end = calc_offset(offset, count)?;
if mem_end > self.size {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(VolatileSlice {
addr: (self.addr as u64 + offset) as *mut _,
size: count,
phantom: PhantomData,
})
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
self.sub_slice(offset, count)
}
}
@ -358,7 +394,7 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut T {
pub fn as_mut_ptr(&self) -> *mut T {
self.addr
}
@ -370,10 +406,10 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
/// # use std::mem::size_of;
/// # use data_model::VolatileRef;
/// let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
/// assert_eq!(v_ref.size(), size_of::<u32>() as u64);
/// assert_eq!(v_ref.size(), size_of::<u32>());
/// ```
pub fn size(&self) -> u64 {
size_of::<T>() as u64
pub fn size(&self) -> usize {
size_of::<T>()
}
/// Does a volatile write of the value `v` to the address of this ref.
@ -393,7 +429,7 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
/// Converts this `T` reference to a raw slice with the same size and address.
pub fn to_slice(&self) -> VolatileSlice<'a> {
unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>() as u64) }
unsafe { VolatileSlice::from_raw_parts(self.as_mut_ptr() as *mut u8, self.size()) }
}
}
@ -418,19 +454,27 @@ mod tests {
}
impl VolatileMemory for VecMem {
fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let mem_end = calc_offset(offset, count)?;
if mem_end > self.mem.len() as u64 {
if mem_end > self.mem.len() {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(unsafe { VolatileSlice::new((self.mem.as_ptr() as u64 + offset) as *mut _, count) })
let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
VolatileMemoryError::Overflow {
base: self.mem.as_ptr() as usize,
offset,
},
)?;
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
}
}
#[test]
fn ref_store() {
let mut a = [0u8; 1];
let a_ref = &mut a[..];
let a_ref = VolatileSlice::new(&mut a[..]);
let v_ref = a_ref.get_ref(0).unwrap();
v_ref.store(2u8);
assert_eq!(a[0], 2);
@ -440,7 +484,7 @@ mod tests {
fn ref_load() {
let mut a = [5u8; 1];
{
let a_ref = &mut a[..];
let a_ref = VolatileSlice::new(&mut a[..]);
let c = {
let v_ref = a_ref.get_ref::<u8>(0).unwrap();
assert_eq!(v_ref.load(), 5u8);
@ -457,11 +501,11 @@ mod tests {
#[test]
fn ref_to_slice() {
let mut a = [1u8; 5];
let a_ref = &mut a[..];
let a_ref = VolatileSlice::new(&mut a[..]);
let v_ref = a_ref.get_ref(1).unwrap();
v_ref.store(0x12345678u32);
let ref_slice = v_ref.to_slice();
assert_eq!(v_ref.as_ptr() as u64, ref_slice.as_ptr() as u64);
assert_eq!(v_ref.as_mut_ptr() as usize, ref_slice.as_mut_ptr() as usize);
assert_eq!(v_ref.size(), ref_slice.size());
}
@ -506,7 +550,7 @@ mod tests {
#[test]
fn slice_overflow_error() {
use std::u64::MAX;
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_slice(MAX, 1).unwrap_err();
assert_eq!(
@ -528,7 +572,7 @@ mod tests {
#[test]
fn ref_overflow_error() {
use std::u64::MAX;
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_ref::<u8>(MAX).unwrap_err();
assert_eq!(

View file

@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::borrow::Cow;
use std::cmp;
use std::convert::TryInto;
use std::ffi::c_void;
use std::fmt::{self, Display};
use std::io::{self, Read, Write};
use std::iter::FromIterator;
@ -13,7 +13,7 @@ use std::mem::{size_of, MaybeUninit};
use std::ptr::copy_nonoverlapping;
use std::result;
use data_model::{DataInit, Le16, Le32, Le64, VolatileMemory, VolatileMemoryError, VolatileSlice};
use data_model::{DataInit, Le16, Le32, Le64, VolatileMemoryError, VolatileSlice};
use sys_util::{
FileReadWriteAtVolatile, FileReadWriteVolatile, GuestAddress, GuestMemory, IntoIovec,
};
@ -54,10 +54,9 @@ impl std::error::Error for Error {}
#[derive(Clone)]
struct DescriptorChainConsumer<'a> {
buffers: Vec<libc::iovec>,
buffers: Vec<VolatileSlice<'a>>,
current: usize,
bytes_consumed: usize,
mem: PhantomData<&'a GuestMemory>,
}
impl<'a> DescriptorChainConsumer<'a> {
@ -67,7 +66,7 @@ impl<'a> DescriptorChainConsumer<'a> {
// `Reader::new()` and `Writer::new()`).
self.get_remaining()
.iter()
.fold(0usize, |count, buf| count + buf.iov_len)
.fold(0usize, |count, buf| count + buf.size())
}
fn bytes_consumed(&self) -> usize {
@ -78,10 +77,38 @@ impl<'a> DescriptorChainConsumer<'a> {
/// consume any bytes from the `DescriptorChain`. Instead callers should use the `consume`
/// method to advance the `DescriptorChain`. Multiple calls to `get` with no intervening calls
/// to `consume` will return the same data.
fn get_remaining(&self) -> &[libc::iovec] {
fn get_remaining(&self) -> &[VolatileSlice] {
&self.buffers[self.current..]
}
/// Like `get_remaining` but guarantees that the combined length of all the returned iovecs is
/// not greater than `count`. The combined length of the returned iovecs may be less than
/// `count` but will always be greater than 0 as long as there is still space left in the
/// `DescriptorChain`.
fn get_remaining_with_count(&self, count: usize) -> Cow<[VolatileSlice]> {
let iovs = self.get_remaining();
let mut iov_count = 0;
let mut rem = count;
for iov in iovs {
if rem < iov.size() {
break;
}
iov_count += 1;
rem -= iov.size();
}
// Special case where the number of bytes to be copied is smaller than the `size()` of the
// first iovec.
if iov_count == 0 && iovs.len() > 0 && count > 0 {
debug_assert!(count < iovs[0].size());
// Safe because we know that count is smaller than the length of the first slice.
Cow::Owned(vec![iovs[0].sub_slice(0, count).unwrap()])
} else {
Cow::Borrowed(&iovs[..iov_count])
}
}
/// Consumes `count` bytes from the `DescriptorChain`. If `count` is larger than
/// `self.available_bytes()` then all remaining bytes in the `DescriptorChain` will be consumed.
///
@ -99,19 +126,18 @@ impl<'a> DescriptorChainConsumer<'a> {
break;
}
let consumed = if count < buf.iov_len {
let consumed = if count < buf.size() {
// Safe because we know that the iovec pointed to valid memory and we are adding a
// value that is smaller than the length of the memory.
buf.iov_base = unsafe { (buf.iov_base as *mut u8).add(count) as *mut c_void };
buf.iov_len -= count;
*buf = buf.offset(count).unwrap();
count
} else {
self.current += 1;
buf.iov_len
buf.size()
};
// This shouldn't overflow because `consumed <= buf.iov_len` and we already verified
// that adding all `buf.iov_len` values will not overflow when the Reader/Writer was
// This shouldn't overflow because `consumed <= buf.size()` and we already verified
// that adding all `buf.size()` values will not overflow when the Reader/Writer was
// constructed.
self.bytes_consumed += consumed;
count -= consumed;
@ -126,13 +152,14 @@ impl<'a> DescriptorChainConsumer<'a> {
let mut rem = offset;
let mut end = self.current;
for buf in &mut self.buffers[self.current..] {
if rem < buf.iov_len {
buf.iov_len = rem;
if rem < buf.size() {
// Safe because we are creating a smaller sub-slice.
*buf = buf.sub_slice(0, rem).unwrap();
break;
}
end += 1;
rem -= buf.iov_len;
rem -= buf.size();
}
self.buffers.truncate(end + 1);
@ -140,51 +167,16 @@ impl<'a> DescriptorChainConsumer<'a> {
other
}
// Temporary method for converting iovecs into VolatileSlices until we can change the
// ReadWriteVolatile traits. The irony here is that the standard implementation of the
// ReadWriteVolatile traits will convert the VolatileSlices back into iovecs.
fn get_volatile_slices(&mut self, mut count: usize) -> Vec<VolatileSlice> {
let bufs = self.get_remaining();
let mut iovs = Vec::with_capacity(bufs.len());
for b in bufs {
// Safe because we verified during construction that the memory at `b.iov_base` is
// `b.iov_len` bytes long. The lifetime of the `VolatileSlice` is tied to the lifetime
// of this `DescriptorChainConsumer`, which is in turn tied to the lifetime of the
// `GuestMemory` used to create it and so the memory will be available for the duration
// of the `VolatileSlice`.
let iov = unsafe {
if count < b.iov_len {
VolatileSlice::new(
b.iov_base as *mut u8,
count.try_into().expect("usize doesn't fit in u64"),
)
} else {
VolatileSlice::new(
b.iov_base as *mut u8,
b.iov_len.try_into().expect("usize doesn't fit in u64"),
)
}
};
count -= iov.size() as usize;
iovs.push(iov);
}
iovs
}
fn get_iovec(&mut self, len: usize) -> io::Result<DescriptorIovec<'a>> {
let mut iovec = Vec::with_capacity(self.get_remaining().len());
let mut rem = len;
for buf in self.get_remaining() {
let iov = if rem < buf.iov_len {
libc::iovec {
iov_base: buf.iov_base,
iov_len: rem,
}
let iov = if rem < buf.size() {
// Safe because we know that `rem` is in-bounds.
buf.sub_slice(0, rem).unwrap().as_iovec()
} else {
buf.clone()
buf.as_iovec()
};
rem -= iov.iov_len;
@ -249,21 +241,18 @@ impl<'a> Reader<'a> {
.checked_add(desc.len as usize)
.ok_or(Error::DescriptorChainOverflow)?;
let vs = mem
.get_slice(desc.addr.offset(), desc.len.into())
.map_err(Error::VolatileMemoryError)?;
Ok(libc::iovec {
iov_base: vs.as_ptr() as *mut c_void,
iov_len: vs.size() as usize,
})
mem.get_slice_at_addr(
desc.addr,
desc.len.try_into().expect("u32 doesn't fit in usize"),
)
.map_err(Error::GuestMemoryError)
})
.collect::<Result<Vec<libc::iovec>>>()?;
.collect::<Result<Vec<VolatileSlice>>>()?;
Ok(Reader {
buffer: DescriptorChainConsumer {
buffers,
current: 0,
bytes_consumed: 0,
mem: PhantomData,
},
})
}
@ -311,7 +300,7 @@ impl<'a> Reader<'a> {
mut dst: F,
count: usize,
) -> io::Result<usize> {
let iovs = self.buffer.get_volatile_slices(count);
let iovs = self.buffer.get_remaining_with_count(count);
let written = dst.write_vectored_volatile(&iovs[..])?;
self.buffer.consume(written);
Ok(written)
@ -327,7 +316,7 @@ impl<'a> Reader<'a> {
count: usize,
off: u64,
) -> io::Result<usize> {
let iovs = self.buffer.get_volatile_slices(count);
let iovs = self.buffer.get_remaining_with_count(count);
let written = dst.write_vectored_at_volatile(&iovs[..], off)?;
self.buffer.consume(written);
Ok(written)
@ -418,11 +407,11 @@ impl<'a> io::Read for Reader<'a> {
break;
}
let count = cmp::min(rem.len(), b.iov_len);
let count = cmp::min(rem.len(), b.size());
// Safe because we have already verified that `b` points to valid memory.
unsafe {
copy_nonoverlapping(b.iov_base as *const u8, rem.as_mut_ptr(), count);
copy_nonoverlapping(b.as_ptr(), rem.as_mut_ptr(), count);
}
rem = &mut rem[count..];
total += count;
@ -460,21 +449,18 @@ impl<'a> Writer<'a> {
.checked_add(desc.len as usize)
.ok_or(Error::DescriptorChainOverflow)?;
let vs = mem
.get_slice(desc.addr.offset(), desc.len.into())
.map_err(Error::VolatileMemoryError)?;
Ok(libc::iovec {
iov_base: vs.as_ptr() as *mut c_void,
iov_len: vs.size() as usize,
})
mem.get_slice_at_addr(
desc.addr,
desc.len.try_into().expect("u32 doesn't fit in usize"),
)
.map_err(Error::GuestMemoryError)
})
.collect::<Result<Vec<libc::iovec>>>()?;
.collect::<Result<Vec<VolatileSlice>>>()?;
Ok(Writer {
buffer: DescriptorChainConsumer {
buffers,
current: 0,
bytes_consumed: 0,
mem: PhantomData,
},
})
}
@ -512,7 +498,7 @@ impl<'a> Writer<'a> {
mut src: F,
count: usize,
) -> io::Result<usize> {
let iovs = self.buffer.get_volatile_slices(count);
let iovs = self.buffer.get_remaining_with_count(count);
let read = src.read_vectored_volatile(&iovs[..])?;
self.buffer.consume(read);
Ok(read)
@ -528,7 +514,7 @@ impl<'a> Writer<'a> {
count: usize,
off: u64,
) -> io::Result<usize> {
let iovs = self.buffer.get_volatile_slices(count);
let iovs = self.buffer.get_remaining_with_count(count);
let read = src.read_vectored_at_volatile(&iovs[..], off)?;
self.buffer.consume(read);
Ok(read)
@ -612,10 +598,10 @@ impl<'a> io::Write for Writer<'a> {
break;
}
let count = cmp::min(rem.len(), b.iov_len);
let count = cmp::min(rem.len(), b.size());
// Safe because we have already verified that `vs` points to valid memory.
unsafe {
copy_nonoverlapping(rem.as_ptr(), b.iov_base as *mut u8, count);
copy_nonoverlapping(rem.as_ptr(), b.as_mut_ptr(), count);
}
rem = &rem[count..];
total += count;
@ -1266,4 +1252,59 @@ mod tests {
.expect("failed to collect() values");
assert_eq!(vs, vs_read);
}
#[test]
fn get_remaining_with_count() {
use DescriptorType::*;
let memory_start_addr = GuestAddress(0x0);
let memory = GuestMemory::new(&vec![(memory_start_addr, 0x10000)]).unwrap();
let chain = create_descriptor_chain(
&memory,
GuestAddress(0x0),
GuestAddress(0x100),
vec![
(Readable, 16),
(Readable, 16),
(Readable, 96),
(Writable, 64),
(Writable, 1),
(Writable, 3),
],
0,
)
.expect("create_descriptor_chain failed");
let Reader { mut buffer } = Reader::new(&memory, chain).expect("failed to create Reader");
let drain = buffer
.get_remaining_with_count(::std::usize::MAX)
.iter()
.fold(0usize, |total, iov| total + iov.size());
assert_eq!(drain, 128);
let exact = buffer
.get_remaining_with_count(32)
.iter()
.fold(0usize, |total, iov| total + iov.size());
assert!(exact > 0);
assert!(exact <= 32);
let split = buffer
.get_remaining_with_count(24)
.iter()
.fold(0usize, |total, iov| total + iov.size());
assert!(split > 0);
assert!(split <= 24);
buffer.consume(64);
let first = buffer
.get_remaining_with_count(8)
.iter()
.fold(0usize, |total, iov| total + iov.size());
assert!(first > 0);
assert!(first <= 8);
}
}

View file

@ -199,7 +199,7 @@ pub fn transfer<'a, S: Iterator<Item = VolatileSlice<'a>>>(
}
let src_subslice = src
.get_slice(offset_within_src, copyable_size)
.get_slice(offset_within_src as usize, copyable_size as usize)
.map_err(|e| Error::MemCopy(e))?;
let dst_line_vertical_offset = checked_arithmetic!(current_height * dst_stride)?;
@ -210,7 +210,7 @@ pub fn transfer<'a, S: Iterator<Item = VolatileSlice<'a>>>(
let dst_start_offset = checked_arithmetic!(dst_resource_offset + dst_line_offset)?;
let dst_subslice = dst
.get_slice(dst_start_offset, copyable_size)
.get_slice(dst_start_offset as usize, copyable_size as usize)
.map_err(|e| Error::MemCopy(e))?;
src_subslice.copy_to_volatile_slice(dst_subslice);
@ -246,7 +246,7 @@ impl Virtio2DResource {
) -> bool {
if iovecs
.iter()
.any(|&(addr, len)| mem.get_slice(addr.offset(), len as u64).is_err())
.any(|&(addr, len)| mem.get_slice_at_addr(addr, len).is_err())
{
return false;
}
@ -303,20 +303,18 @@ impl VirtioResource for Virtio2DResource {
if self
.guest_iovecs
.iter()
.any(|&(addr, len)| guest_mem.get_slice(addr.offset(), len as u64).is_err())
.any(|&(addr, len)| guest_mem.get_slice_at_addr(addr, len).is_err())
{
error!("failed to write to resource: invalid iovec attached");
return;
}
let mut src_slices = Vec::new();
for (addr, len) in &self.guest_iovecs {
let mut src_slices = Vec::with_capacity(self.guest_iovecs.len());
for &(addr, len) in &self.guest_iovecs {
// Unwrap will not panic because we already checked the slices.
src_slices.push(guest_mem.get_slice(addr.offset(), *len as u64).unwrap());
src_slices.push(guest_mem.get_slice_at_addr(addr, len).unwrap());
}
let host_mem_len = self.host_mem.len() as u64;
let src_stride = self.host_mem_stride;
let src_offset = src_offset;
@ -332,10 +330,7 @@ impl VirtioResource for Virtio2DResource {
height,
dst_stride,
dst_offset,
self.host_mem
.as_mut_slice()
.get_slice(0, host_mem_len)
.unwrap(),
VolatileSlice::new(self.host_mem.as_mut_slice()),
src_stride,
src_offset,
src_slices.iter().cloned(),
@ -359,8 +354,6 @@ impl VirtioResource for Virtio2DResource {
let dst_offset = 0;
let host_mem_len = self.host_mem.len() as u64;
if let Err(e) = transfer(
self.width(),
self.height(),
@ -373,13 +366,9 @@ impl VirtioResource for Virtio2DResource {
dst,
src_stride,
src_offset,
[self
.host_mem
.as_mut_slice()
.get_slice(0, host_mem_len)
.unwrap()]
.iter()
.cloned(),
[VolatileSlice::new(self.host_mem.as_mut_slice())]
.iter()
.cloned(),
) {
error!("failed to read from resource: {}", e);
}

View file

@ -291,9 +291,9 @@ impl FileReadWriteAtVolatile for AndroidSparse {
))?;
let chunk_offset = offset - chunk_start;
let chunk_size = *expanded_size;
let subslice = if chunk_offset + slice.size() > chunk_size {
let subslice = if chunk_offset + (slice.size() as u64) > chunk_size {
slice
.sub_slice(0, chunk_size - chunk_offset)
.sub_slice(0, (chunk_size - chunk_offset) as usize)
.map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))?
} else {
slice
@ -331,7 +331,6 @@ impl FileReadWriteAtVolatile for AndroidSparse {
#[cfg(test)]
mod tests {
use super::*;
use data_model::VolatileMemory;
use std::io::{Cursor, Write};
use sys_util::SharedMemory;
@ -435,12 +434,11 @@ mod tests {
}];
let mut image = test_image(chunks);
let mut input_memory = [55u8; 100];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 100).unwrap(), 0)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![0u8; 100]);
let expected = [0u8; 100];
assert_eq!(&expected[..], &input_memory[..]);
}
#[test]
@ -451,12 +449,11 @@ mod tests {
}];
let mut image = test_image(chunks);
let mut input_memory = [55u8; 8];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 8).unwrap(), 0)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![10, 20, 10, 20, 10, 20, 10, 20]);
let expected = [10, 20, 10, 20, 10, 20, 10, 20];
assert_eq!(&expected[..], &input_memory[..]);
}
#[test]
@ -467,12 +464,11 @@ mod tests {
}];
let mut image = test_image(chunks);
let mut input_memory = [55u8; 6];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 6).unwrap(), 1)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![20, 30, 10, 20, 30, 10]);
let expected = [20, 30, 10, 20, 30, 10];
assert_eq!(&expected[..], &input_memory[..]);
}
#[test]
@ -489,12 +485,11 @@ mod tests {
];
let mut image = test_image(chunks);
let mut input_memory = [55u8; 7];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 7).unwrap(), 39)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![20, 30, 10, 20, 30, 10, 20]);
let expected = [20, 30, 10, 20, 30, 10, 20];
assert_eq!(&expected[..], &input_memory[..]);
}
#[test]
@ -506,12 +501,11 @@ mod tests {
let mut image = test_image(chunks);
write!(image.file, "hello").expect("Failed to write into internal file");
let mut input_memory = [55u8; 5];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 5).unwrap(), 0)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![104, 101, 108, 108, 111]);
let expected = [104, 101, 108, 108, 111];
assert_eq!(&expected[..], &input_memory[..]);
}
#[test]
@ -528,11 +522,10 @@ mod tests {
];
let mut image = test_image(chunks);
let mut input_memory = [55u8; 8];
let input_volatile_memory = &mut input_memory[..];
image
.read_exact_at_volatile(input_volatile_memory.get_slice(0, 8).unwrap(), 0)
.read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0)
.expect("Could not read");
let input_vec: Vec<u8> = input_memory.into_iter().cloned().collect();
assert_eq!(input_vec, vec![10, 20, 10, 20, 30, 40, 30, 40]);
let expected = [10, 20, 10, 20, 30, 40, 30, 40];
assert_eq!(&expected[..], &input_memory[..]);
}
}

View file

@ -1090,8 +1090,7 @@ impl QcowFile {
let cluster_size = self.raw_file.cluster_size();
let cluster_begin = address - (address % cluster_size);
let mut cluster_data = vec![0u8; cluster_size as usize];
let raw_slice = cluster_data.as_mut_slice();
let volatile_slice = raw_slice.get_slice(0, cluster_size).unwrap();
let volatile_slice = VolatileSlice::new(&mut cluster_data);
backing.read_exact_at_volatile(volatile_slice, cluster_begin)?;
Some(cluster_data)
} else {
@ -1537,12 +1536,13 @@ impl AsRawFds for QcowFile {
impl Read for QcowFile {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let slice = buf.get_slice(0, buf.len() as u64).unwrap();
let len = buf.len();
let slice = VolatileSlice::new(buf);
let read_count = self.read_cb(
self.current_offset,
buf.len(),
len,
|file, already_read, offset, count| {
let sub_slice = slice.get_slice(already_read as u64, count as u64).unwrap();
let sub_slice = slice.get_slice(already_read, count).unwrap();
match file {
Some(f) => f.read_exact_at_volatile(sub_slice, offset),
None => {
@ -1610,9 +1610,9 @@ impl FileReadWriteVolatile for QcowFile {
fn read_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize> {
let read_count = self.read_cb(
self.current_offset,
slice.size() as usize,
slice.size(),
|file, read, offset, count| {
let sub_slice = slice.get_slice(read as u64, count as u64).unwrap();
let sub_slice = slice.get_slice(read, count).unwrap();
match file {
Some(f) => f.read_exact_at_volatile(sub_slice, offset),
None => {
@ -1627,14 +1627,11 @@ impl FileReadWriteVolatile for QcowFile {
}
fn write_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize> {
let write_count = self.write_cb(
self.current_offset,
slice.size() as usize,
|file, offset, count| {
let sub_slice = slice.get_slice(offset as u64, count as u64).unwrap();
let write_count =
self.write_cb(self.current_offset, slice.size(), |file, offset, count| {
let sub_slice = slice.get_slice(offset, count).unwrap();
file.write_all_volatile(sub_slice)
},
)?;
})?;
self.current_offset += write_count as u64;
Ok(write_count)
}
@ -1642,25 +1639,21 @@ impl FileReadWriteVolatile for QcowFile {
impl FileReadWriteAtVolatile for QcowFile {
fn read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
self.read_cb(
offset,
slice.size() as usize,
|file, read, offset, count| {
let sub_slice = slice.get_slice(read as u64, count as u64).unwrap();
match file {
Some(f) => f.read_exact_at_volatile(sub_slice, offset),
None => {
sub_slice.write_bytes(0);
Ok(())
}
self.read_cb(offset, slice.size(), |file, read, offset, count| {
let sub_slice = slice.get_slice(read, count).unwrap();
match file {
Some(f) => f.read_exact_at_volatile(sub_slice, offset),
None => {
sub_slice.write_bytes(0);
Ok(())
}
},
)
}
})
}
fn write_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
self.write_cb(offset, slice.size() as usize, |file, offset, count| {
let sub_slice = slice.get_slice(offset as u64, count as u64).unwrap();
self.write_cb(offset, slice.size(), |file, offset, count| {
let sub_slice = slice.get_slice(offset, count).unwrap();
file.write_all_volatile(sub_slice)
})
}

View file

@ -6,7 +6,7 @@ use std::fs::File;
use std::io::{self, BufWriter, Read, Seek, SeekFrom, Write};
use std::mem::size_of;
use data_model::VolatileMemory;
use data_model::VolatileSlice;
use sys_util::{FileReadWriteAtVolatile, WriteZeroes};
/// A qcow file. Allows reading/writing clusters and appending clusters.
@ -149,10 +149,13 @@ impl QcowRawFile {
/// Writes
pub fn write_cluster(&mut self, address: u64, mut initial_data: Vec<u8>) -> io::Result<()> {
let raw_slice = initial_data.as_mut_slice();
let volatile_slice = raw_slice
.get_slice(0, self.cluster_size)
.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{:?}", e)))?;
if (initial_data.len() as u64) < self.cluster_size {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"`initial_data` is too small",
));
}
let volatile_slice = VolatileSlice::new(&mut initial_data[..self.cluster_size as usize]);
self.file.write_all_at_volatile(volatile_slice, address)
}
}

View file

@ -461,7 +461,6 @@ impl AsRawFd for Buffer {
#[cfg(test)]
mod tests {
use super::*;
use data_model::VolatileMemory;
use std::fmt::Write;
#[test]

View file

@ -13,7 +13,7 @@ fn main() {
row[x] = b | (g << 8);
}
mem.as_volatile_slice()
.offset((1280 * 4 * y) as u64)
.offset(1280 * 4 * y)
.unwrap()
.copy_from(&row);
}

View file

@ -18,7 +18,6 @@ struct Buffer {
width: u32,
height: u32,
bytes_per_pixel: u32,
bytes_total: u64,
bytes: Vec<u8>,
}
@ -28,7 +27,7 @@ impl Drop for Buffer {
impl Buffer {
fn as_volatile_slice(&mut self) -> VolatileSlice {
unsafe { VolatileSlice::new(self.bytes.as_mut_ptr(), self.bytes_total) }
VolatileSlice::new(self.bytes.as_mut_slice())
}
fn stride(&self) -> usize {
@ -66,7 +65,6 @@ impl Surface {
width: self.width,
height: self.height,
bytes_per_pixel,
bytes_total,
bytes: vec![0; bytes_total as usize],
});
}

View file

@ -255,10 +255,7 @@ impl DisplayT for DisplayWl {
let buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
let framebuffer = surface
.buffer_mem
.get_slice(
(buffer_index * surface.buffer_size) as u64,
surface.buffer_size as u64,
)
.get_slice(buffer_index * surface.buffer_size, surface.buffer_size)
.ok()?;
Some(GpuDisplayFramebuffer::new(
framebuffer,

View file

@ -107,7 +107,7 @@ impl<'a> GpuDisplayFramebuffer<'a> {
.checked_add(width_bytes)?;
let slice = self
.framebuffer
.sub_slice(byte_offset as u64, count as u64)
.sub_slice(byte_offset as usize, count as usize)
.unwrap();
Some(GpuDisplayFramebuffer { slice, ..*self })

View file

@ -23,7 +23,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
use libc::close;
use data_model::{VolatileMemory, VolatileSlice};
use data_model::VolatileSlice;
use sys_util::{debug, GuestAddress, GuestMemory};
use crate::generated::p_defines::{
@ -411,7 +411,7 @@ impl Renderer {
{
if vecs
.iter()
.any(|&(addr, len)| mem.get_slice(addr.offset(), len as u64).is_err())
.any(|&(addr, len)| mem.get_slice_at_addr(addr, len).is_err())
{
return Err(Error::InvalidIovec);
}
@ -419,9 +419,9 @@ impl Renderer {
let mut iovecs = Vec::new();
for &(addr, len) in vecs {
// Unwrap will not panic because we already checked the slices.
let slice = mem.get_slice(addr.offset(), len as u64).unwrap();
let slice = mem.get_slice_at_addr(addr, len).unwrap();
iovecs.push(VirglVec {
base: slice.as_ptr() as *mut c_void,
base: slice.as_mut_ptr() as *mut c_void,
len,
});
}
@ -591,7 +591,7 @@ impl Resource {
) -> Result<()> {
if iovecs
.iter()
.any(|&(addr, len)| mem.get_slice(addr.offset(), len as u64).is_err())
.any(|&(addr, len)| mem.get_slice_at_addr(addr, len).is_err())
{
return Err(Error::InvalidIovec);
}
@ -599,9 +599,9 @@ impl Resource {
self.backing_mem = Some(mem.clone());
for &(addr, len) in iovecs {
// Unwrap will not panic because we already checked the slices.
let slice = mem.get_slice(addr.offset(), len as u64).unwrap();
let slice = mem.get_slice_at_addr(addr, len).unwrap();
self.backing_iovecs.push(VirglVec {
base: slice.as_ptr() as *mut c_void,
base: slice.as_mut_ptr() as *mut c_void,
len,
});
}

View file

@ -98,7 +98,7 @@ pub trait FileReadWriteVolatile {
}
// Will panic if read_volatile read more bytes than we gave it, which would be worthy of
// a panic.
slice = slice.offset(bytes_read as u64).unwrap();
slice = slice.offset(bytes_read).unwrap();
}
Ok(())
}
@ -129,7 +129,7 @@ pub trait FileReadWriteVolatile {
}
// Will panic if read_volatile read more bytes than we gave it, which would be worthy of
// a panic.
slice = slice.offset(bytes_written as u64).unwrap();
slice = slice.offset(bytes_written).unwrap();
}
Ok(())
}
@ -187,7 +187,7 @@ pub trait FileReadWriteAtVolatile {
match self.read_at_volatile(slice, offset) {
Ok(0) => return Err(Error::from(ErrorKind::UnexpectedEof)),
Ok(n) => {
slice = slice.offset(n as u64).unwrap();
slice = slice.offset(n).unwrap();
offset = offset.checked_add(n as u64).unwrap();
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
@ -221,7 +221,7 @@ pub trait FileReadWriteAtVolatile {
match self.write_at_volatile(slice, offset) {
Ok(0) => return Err(Error::from(ErrorKind::WriteZero)),
Ok(n) => {
slice = slice.offset(n as u64).unwrap();
slice = slice.offset(n).unwrap();
offset = offset.checked_add(n as u64).unwrap();
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
@ -282,7 +282,7 @@ macro_rules! volatile_impl {
let ret = unsafe {
$crate::file_traits::lib::read(
self.as_raw_fd(),
slice.as_ptr() as *mut std::ffi::c_void,
slice.as_mut_ptr() as *mut std::ffi::c_void,
slice.size() as usize,
)
};
@ -297,13 +297,7 @@ macro_rules! volatile_impl {
&mut self,
bufs: &[$crate::file_traits::lib::VolatileSlice],
) -> std::io::Result<usize> {
let iovecs: Vec<$crate::file_traits::lib::iovec> = bufs
.iter()
.map(|s| $crate::file_traits::lib::iovec {
iov_base: s.as_ptr() as *mut std::ffi::c_void,
iov_len: s.size() as $crate::file_traits::lib::size_t,
})
.collect();
let iovecs = $crate::file_traits::lib::VolatileSlice::as_iovecs(bufs);
if iovecs.is_empty() {
return Ok(0);
@ -314,7 +308,7 @@ macro_rules! volatile_impl {
let ret = unsafe {
$crate::file_traits::lib::readv(
self.as_raw_fd(),
&iovecs[0],
iovecs.as_ptr(),
iovecs.len() as std::os::raw::c_int,
)
};
@ -349,13 +343,7 @@ macro_rules! volatile_impl {
&mut self,
bufs: &[$crate::file_traits::lib::VolatileSlice],
) -> std::io::Result<usize> {
let iovecs: Vec<$crate::file_traits::lib::iovec> = bufs
.iter()
.map(|s| $crate::file_traits::lib::iovec {
iov_base: s.as_ptr() as *mut std::ffi::c_void,
iov_len: s.size() as $crate::file_traits::lib::size_t,
})
.collect();
let iovecs = $crate::file_traits::lib::VolatileSlice::as_iovecs(bufs);
if iovecs.is_empty() {
return Ok(0);
@ -366,7 +354,7 @@ macro_rules! volatile_impl {
let ret = unsafe {
$crate::file_traits::lib::writev(
self.as_raw_fd(),
&iovecs[0],
iovecs.as_ptr(),
iovecs.len() as std::os::raw::c_int,
)
};
@ -394,7 +382,7 @@ macro_rules! volatile_at_impl {
let ret = unsafe {
$crate::file_traits::lib::pread64(
self.as_raw_fd(),
slice.as_ptr() as *mut std::ffi::c_void,
slice.as_mut_ptr() as *mut std::ffi::c_void,
slice.size() as usize,
offset as $crate::file_traits::lib::off64_t,
)
@ -412,13 +400,7 @@ macro_rules! volatile_at_impl {
bufs: &[$crate::file_traits::lib::VolatileSlice],
offset: u64,
) -> std::io::Result<usize> {
let iovecs: Vec<$crate::file_traits::lib::iovec> = bufs
.iter()
.map(|s| $crate::file_traits::lib::iovec {
iov_base: s.as_ptr() as *mut std::ffi::c_void,
iov_len: s.size() as $crate::file_traits::lib::size_t,
})
.collect();
let iovecs = $crate::file_traits::lib::VolatileSlice::as_iovecs(bufs);
if iovecs.is_empty() {
return Ok(0);
@ -429,7 +411,7 @@ macro_rules! volatile_at_impl {
let ret = unsafe {
$crate::file_traits::lib::preadv64(
self.as_raw_fd(),
&iovecs[0],
iovecs.as_ptr(),
iovecs.len() as std::os::raw::c_int,
offset as $crate::file_traits::lib::off64_t,
)
@ -469,13 +451,7 @@ macro_rules! volatile_at_impl {
bufs: &[$crate::file_traits::lib::VolatileSlice],
offset: u64,
) -> std::io::Result<usize> {
let iovecs: Vec<$crate::file_traits::lib::iovec> = bufs
.iter()
.map(|s| $crate::file_traits::lib::iovec {
iov_base: s.as_ptr() as *mut std::ffi::c_void,
iov_len: s.size() as $crate::file_traits::lib::size_t,
})
.collect();
let iovecs = $crate::file_traits::lib::VolatileSlice::as_iovecs(bufs);
if iovecs.is_empty() {
return Ok(0);
@ -486,7 +462,7 @@ macro_rules! volatile_at_impl {
let ret = unsafe {
$crate::file_traits::lib::pwritev64(
self.as_raw_fd(),
&iovecs[0],
iovecs.as_ptr(),
iovecs.len() as std::os::raw::c_int,
offset as $crate::file_traits::lib::off64_t,
)

View file

@ -7,6 +7,7 @@
use std::convert::AsRef;
use std::convert::TryFrom;
use std::fmt::{self, Display};
use std::mem::size_of;
use std::os::unix::io::{AsRawFd, RawFd};
use std::result;
use std::sync::Arc;
@ -87,11 +88,19 @@ struct MemoryRegion {
memfd_offset: u64,
}
fn region_end(region: &MemoryRegion) -> GuestAddress {
// unchecked_add is safe as the region bounds were checked when it was created.
region
.guest_base
.unchecked_add(region.mapping.size() as u64)
impl MemoryRegion {
fn start(&self) -> GuestAddress {
self.guest_base
}
fn end(&self) -> GuestAddress {
// unchecked_add is safe as the region bounds were checked when it was created.
self.guest_base.unchecked_add(self.mapping.size() as u64)
}
fn contains(&self, addr: GuestAddress) -> bool {
addr >= self.guest_base && addr < self.end()
}
}
/// Tracks a memory region and where it is mapped in the guest, along with a shm
@ -200,8 +209,8 @@ impl GuestMemory {
pub fn end_addr(&self) -> GuestAddress {
self.regions
.iter()
.max_by_key(|region| region.guest_base)
.map_or(GuestAddress(0), |region| region_end(region))
.max_by_key(|region| region.start())
.map_or(GuestAddress(0), MemoryRegion::end)
}
/// Returns the total size of memory in bytes.
@ -214,9 +223,7 @@ impl GuestMemory {
/// Returns true if the given address is within the memory range available to the guest.
pub fn address_in_range(&self, addr: GuestAddress) -> bool {
self.regions
.iter()
.any(|region| region.guest_base <= addr && addr < region_end(region))
self.regions.iter().any(|region| region.contains(addr))
}
/// Returns true if the given range (start, end) is overlap with the memory range
@ -224,7 +231,7 @@ impl GuestMemory {
pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
self.regions
.iter()
.any(|region| region.guest_base < end && start < region_end(region))
.any(|region| region.start() < end && start < region.end())
}
/// Returns the address plus the offset if it is in range.
@ -267,7 +274,7 @@ impl GuestMemory {
for (index, region) in self.regions.iter().enumerate() {
cb(
index,
region.guest_base,
region.start(),
region.mapping.size(),
region.mapping.as_ptr() as usize,
region.memfd_offset,
@ -442,6 +449,61 @@ impl GuestMemory {
})
}
/// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
/// is not a subset of this `GuestMemory`.
///
/// # Examples
/// * Write `99` to 30 bytes starting at guest address 0x1010.
///
/// ```
/// # use sys_util::{GuestAddress, GuestMemory, GuestMemoryError, MemoryMapping};
/// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
/// let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
/// vslice.write_bytes(99);
/// # Ok(())
/// # }
/// ```
pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
self.regions
.iter()
.find(|region| region.contains(addr))
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|region| {
// The cast to a usize is safe here because we know that `region.contains(addr)` and
// it's not possible for a memory region to be larger than what fits in a usize.
region
.mapping
.get_slice(addr.offset_from(region.start()) as usize, len)
.map_err(Error::VolatileMemoryAccess)
})
}
/// Returns a `VolatileRef` to an object at `addr`. Returns Ok(()) if the object fits, or Err if
/// it extends past the end.
///
/// # Examples
/// * Get a &u64 at offset 0x1010.
///
/// ```
/// # use sys_util::{GuestAddress, GuestMemory, GuestMemoryError, MemoryMapping};
/// # fn test_ref_u64() -> Result<(), GuestMemoryError> {
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
/// gm.write_obj_at_addr(47u64, GuestAddress(0x1010))?;
/// let vref = gm.get_ref_at_addr::<u64>(GuestAddress(0x1010))?;
/// assert_eq!(vref.load(), 47u64);
/// # Ok(())
/// # }
/// ```
pub fn get_ref_at_addr<T: DataInit>(&self, addr: GuestAddress) -> Result<VolatileRef<T>> {
let buf = self.get_slice_at_addr(addr, size_of::<T>())?;
// Safe because we have know that `buf` is at least `size_of::<T>()` bytes and that the
// returned reference will not outlive this `GuestMemory`.
Ok(unsafe { VolatileRef::new(buf.as_mut_ptr() as *mut T) })
}
/// Reads data from a file descriptor and writes it to guest memory.
///
/// # Arguments
@ -550,15 +612,16 @@ impl GuestMemory {
where
F: FnOnce(&MemoryMapping, usize) -> Result<T>,
{
for region in self.regions.iter() {
if guest_addr >= region.guest_base && guest_addr < region_end(region) {
return cb(
self.regions
.iter()
.find(|region| region.contains(guest_addr))
.ok_or(Error::InvalidGuestAddress(guest_addr))
.and_then(|region| {
cb(
&region.mapping,
guest_addr.offset_from(region.guest_base) as usize,
);
}
}
Err(Error::InvalidGuestAddress(guest_addr))
guest_addr.offset_from(region.start()) as usize,
)
})
}
/// Convert a GuestAddress into an offset within self.memfd.
@ -585,25 +648,11 @@ impl GuestMemory {
/// assert_eq!(offset, 0x3500);
/// ```
pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
for region in self.regions.iter() {
if guest_addr >= region.guest_base && guest_addr < region_end(region) {
return Ok(region.memfd_offset + guest_addr.offset_from(region.guest_base) as u64);
}
}
Err(Error::InvalidGuestAddress(guest_addr))
}
}
impl VolatileMemory for GuestMemory {
fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
for region in self.regions.iter() {
if offset >= region.guest_base.0 && offset < region_end(region).0 {
return region
.mapping
.get_slice(offset - region.guest_base.0, count);
}
}
Err(VolatileMemoryError::OutOfBounds { addr: offset })
self.regions
.iter()
.find(|region| region.contains(guest_addr))
.ok_or(Error::InvalidGuestAddress(guest_addr))
.map(|region| region.memfd_offset + guest_addr.offset_from(region.start()))
}
}
@ -690,8 +739,11 @@ mod tests {
gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
.unwrap();
let num1: u64 = gm.get_ref(0x500).unwrap().load();
let num2: u64 = gm.get_ref(0x1000 + 32).unwrap().load();
let num1: u64 = gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().load();
let num2: u64 = gm
.get_ref_at_addr(GuestAddress(0x1000 + 32))
.unwrap()
.load();
assert_eq!(val1, num1);
assert_eq!(val2, num2);
}
@ -704,8 +756,10 @@ mod tests {
let val1: u64 = 0xaa55aa55aa55aa55;
let val2: u64 = 0x55aa55aa55aa55aa;
gm.get_ref(0x500).unwrap().store(val1);
gm.get_ref(0x1000 + 32).unwrap().store(val2);
gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().store(val1);
gm.get_ref_at_addr(GuestAddress(0x1000 + 32))
.unwrap()
.store(val2);
let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
assert_eq!(val1, num1);

View file

@ -608,15 +608,23 @@ impl MemoryMapping {
}
impl VolatileMemory for MemoryMapping {
fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
let mem_end = calc_offset(offset, count)?;
if mem_end > self.size as u64 {
if mem_end > self.size {
return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
}
let new_addr =
(self.as_ptr() as usize)
.checked_add(offset)
.ok_or(VolatileMemoryError::Overflow {
base: self.as_ptr() as usize,
offset,
})?;
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::new((self.addr as usize + offset as usize) as *mut _, count) })
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
}
}
@ -888,11 +896,11 @@ mod tests {
#[test]
fn slice_overflow_error() {
let m = MemoryMapping::new(5).unwrap();
let res = m.get_slice(std::u64::MAX, 3).unwrap_err();
let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
assert_eq!(
res,
VolatileMemoryError::Overflow {
base: std::u64::MAX,
base: std::usize::MAX,
offset: 3,
}
);

View file

@ -327,10 +327,7 @@ unsafe impl<'a> IntoIovec for &'a [u8] {
// pointer and size are guaranteed to be accurate.
unsafe impl<'a> IntoIovec for VolatileSlice<'a> {
fn into_iovec(&self) -> Vec<libc::iovec> {
vec![libc::iovec {
iov_base: self.as_ptr() as *const c_void as *mut c_void,
iov_len: self.size() as usize,
}]
vec![self.as_iovec()]
}
}

View file

@ -10,7 +10,6 @@ use std::slice;
use libc::c_char;
use data_model::VolatileMemory;
use devices::{PciAddress, PciInterruptPin};
use sys_util::{GuestAddress, GuestMemory};
@ -140,7 +139,7 @@ pub fn setup_mptable(
return Err(Error::AddressOverflow);
}
mem.get_slice(base_mp.0, mp_size as u64)
mem.get_slice_at_addr(base_mp, mp_size)
.map_err(|_| Error::Clear)?
.write_bytes(0);