mirror of
https://github.com/google/alioth.git
synced 2024-11-28 09:26:21 +00:00
fix!: use u64
as the type of guest addresses
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
This commit is contained in:
parent
42ee9393bb
commit
548e748a85
30 changed files with 385 additions and 393 deletions
|
@ -12,8 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const MMIO_32_START: usize = 0x1000_0000; // 256 MiB
|
||||
pub const MMIO_32_END: usize = 0x3000_0000; // 768 MiB, size = 512 MiB
|
||||
pub const PCIE_CONFIG_START: usize = 0x3000_0000; // 768 MiB
|
||||
pub const MEM_64_START: usize = 0x1_0000_0000; // 4GiB
|
||||
pub const PAGE_SIZE: usize = 0x1000; // 4KiB
|
||||
pub const MMIO_32_START: u64 = 0x1000_0000; // 256 MiB
|
||||
pub const MMIO_32_END: u64 = 0x3000_0000; // 768 MiB, size = 512 MiB
|
||||
pub const PCIE_CONFIG_START: u64 = 0x3000_0000; // 768 MiB
|
||||
pub const MEM_64_START: u64 = 0x1_0000_0000; // 4GiB
|
||||
pub const PAGE_SIZE: u64 = 0x1000; // 4KiB
|
||||
|
|
|
@ -12,42 +12,42 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const REAL_MODE_IVT_START: usize = 0x0;
|
||||
pub const REAL_MODE_IVT_START: u64 = 0x0;
|
||||
|
||||
pub const BIOS_DATA_AREA_START: usize = 0x400;
|
||||
pub const BIOS_DATA_END: usize = 0x500;
|
||||
pub const BIOS_DATA_AREA_START: u64 = 0x400;
|
||||
pub const BIOS_DATA_END: u64 = 0x500;
|
||||
|
||||
pub const BOOT_GDT_START: usize = 0x500;
|
||||
pub const BOOT_GDT_LIMIT: usize = 0x100;
|
||||
pub const BOOT_IDT_START: usize = 0x600;
|
||||
pub const BOOT_IDT_LIMIT: usize = 0xa00;
|
||||
pub const BOOT_GDT_START: u64 = 0x500;
|
||||
pub const BOOT_GDT_LIMIT: u64 = 0x100;
|
||||
pub const BOOT_IDT_START: u64 = 0x600;
|
||||
pub const BOOT_IDT_LIMIT: u64 = 0xa00;
|
||||
|
||||
pub const LINUX_BOOT_PARAMS_START: usize = 0x1000; // size: 4KiB
|
||||
pub const HVM_START_INFO_START: usize = 0x1000; // size: 4KiB
|
||||
pub const LINUX_BOOT_PARAMS_START: u64 = 0x1000; // size: 4KiB
|
||||
pub const HVM_START_INFO_START: u64 = 0x1000; // size: 4KiB
|
||||
|
||||
pub const KERNEL_CMD_LINE_START: usize = 0x2000;
|
||||
pub const KERNEL_CMD_LINE_LIMIT: usize = 0x1000;
|
||||
pub const KERNEL_CMD_LINE_START: u64 = 0x2000;
|
||||
pub const KERNEL_CMD_LINE_LIMIT: u64 = 0x1000;
|
||||
|
||||
pub const BOOT_PAGING_START: usize = 0x3000;
|
||||
pub const BOOT_PAGING_LIMIT: usize = 0x4000;
|
||||
pub const BOOT_PAGING_START: u64 = 0x3000;
|
||||
pub const BOOT_PAGING_LIMIT: u64 = 0x4000;
|
||||
|
||||
pub const EBDA_START: usize = 0x8_0000;
|
||||
pub const EBDA_END: usize = 0xA_0000;
|
||||
pub const EBDA_START: u64 = 0x8_0000;
|
||||
pub const EBDA_END: u64 = 0xA_0000;
|
||||
|
||||
pub const KERNEL_IMAGE_START: usize = 0x100_0000; // 16 MiB
|
||||
pub const KERNEL_IMAGE_START: u64 = 0x100_0000; // 16 MiB
|
||||
|
||||
pub const RAM_32_END: usize = 0x8000_0000; // 2 GiB
|
||||
pub const RAM_32_SIZE: usize = RAM_32_END; // 2 GiB
|
||||
pub const RAM_32_END: u64 = 0x8000_0000; // 2 GiB
|
||||
pub const RAM_32_SIZE: u64 = RAM_32_END; // 2 GiB
|
||||
|
||||
pub const MMIO_32_START: usize = 0x8000_0000; // 2 GiB
|
||||
pub const MMIO_32_END: usize = 0xe000_0000; // 3.5 GiB
|
||||
pub const MMIO_32_START: u64 = 0x8000_0000; // 2 GiB
|
||||
pub const MMIO_32_END: u64 = 0xe000_0000; // 3.5 GiB
|
||||
|
||||
pub const PCIE_CONFIG_START: usize = 0xe000_0000; // 3.5 GiB
|
||||
pub const PCIE_CONFIG_END: usize = 0xf000_0000; // 3.75 GiB, size = 256 MiB
|
||||
pub const PCIE_CONFIG_START: u64 = 0xe000_0000; // 3.5 GiB
|
||||
pub const PCIE_CONFIG_END: u64 = 0xf000_0000; // 3.75 GiB, size = 256 MiB
|
||||
|
||||
pub const IOAPIC_START: usize = 0xfec0_0000;
|
||||
pub const APIC_START: usize = 0xfee0_0000;
|
||||
pub const IOAPIC_START: u64 = 0xfec0_0000;
|
||||
pub const APIC_START: u64 = 0xfee0_0000;
|
||||
|
||||
pub const MEM_64_START: usize = 0x1_0000_0000; // 4GiB
|
||||
pub const MEM_64_START: u64 = 0x1_0000_0000; // 4GiB
|
||||
|
||||
pub const PAGE_SIZE: usize = 0x1000; // 4KiB
|
||||
pub const PAGE_SIZE: u64 = 0x1000; // 4KiB
|
||||
|
|
|
@ -77,7 +77,7 @@ pub const STATE_SHUTDOWN: u8 = 2;
|
|||
pub const STATE_REBOOT_PENDING: u8 = 3;
|
||||
|
||||
pub struct BoardConfig {
|
||||
pub mem_size: usize,
|
||||
pub mem_size: u64,
|
||||
pub num_cpu: u32,
|
||||
pub coco: Option<Coco>,
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ where
|
|||
}
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
let range_ref = ram.get_slice::<u8>(desc.base as usize, desc.len as usize)?;
|
||||
let range_ref = ram.get_slice::<u8>(desc.base as u64, desc.len as u64)?;
|
||||
let range_bytes =
|
||||
unsafe { std::slice::from_raw_parts_mut(range_ref.as_ptr() as _, range_ref.len()) };
|
||||
ram_bus.mark_private_memory(desc.base as _, desc.len as _, true)?;
|
||||
|
@ -163,7 +163,7 @@ where
|
|||
let offset = desc_offset + i * size_of::<SevMetadataDesc>();
|
||||
self.update_snp_desc(offset, fw_range)?;
|
||||
}
|
||||
let fw_gpa = (MEM_64_START - fw_range.len()) as u64;
|
||||
let fw_gpa = MEM_64_START - fw_range.len() as u64;
|
||||
ram_bus.mark_private_memory(fw_gpa, fw_range.len() as _, true)?;
|
||||
self.vm
|
||||
.snp_launch_update(fw_range, fw_gpa, SnpPageType::Normal)
|
||||
|
@ -227,7 +227,7 @@ where
|
|||
let ram_bus = memory.ram_bus();
|
||||
|
||||
let low_mem_size = std::cmp::min(config.mem_size, RAM_32_SIZE);
|
||||
let pages_low = ArcMemPages::from_memfd(low_mem_size, None, Some(c"ram-low"))?;
|
||||
let pages_low = ArcMemPages::from_memfd(low_mem_size as usize, None, Some(c"ram-low"))?;
|
||||
let region_low = MemRegion {
|
||||
size: low_mem_size,
|
||||
ranges: vec![MemRange::Mapped(pages_low.clone())],
|
||||
|
@ -260,7 +260,7 @@ where
|
|||
}
|
||||
if config.mem_size > RAM_32_SIZE {
|
||||
let mem_hi_size = config.mem_size - RAM_32_SIZE;
|
||||
let mem_hi = ArcMemPages::from_memfd(mem_hi_size, None, Some(c"ram-high"))?;
|
||||
let mem_hi = ArcMemPages::from_memfd(mem_hi_size as usize, None, Some(c"ram-high"))?;
|
||||
let region_hi = MemRegion::with_mapped(mem_hi.clone(), MemRegionType::Ram);
|
||||
memory.add_region(AddrOpt::Fixed(MEM_64_START), Arc::new(region_hi))?;
|
||||
if let Some(coco) = &self.config.coco {
|
||||
|
@ -312,15 +312,15 @@ where
|
|||
let mut acpi_table = create_acpi(self.config.num_cpu);
|
||||
if self.config.coco.is_none() {
|
||||
let ram = self.memory.ram_bus();
|
||||
acpi_table.relocate((EBDA_START + size_of::<AcpiTableRsdp>()) as u64);
|
||||
acpi_table.relocate(EBDA_START + size_of::<AcpiTableRsdp>() as u64);
|
||||
ram.write_range(
|
||||
EBDA_START,
|
||||
size_of::<AcpiTableRsdp>(),
|
||||
size_of::<AcpiTableRsdp>() as u64,
|
||||
acpi_table.rsdp().as_bytes(),
|
||||
)?;
|
||||
ram.write_range(
|
||||
EBDA_START + size_of::<AcpiTableRsdp>(),
|
||||
acpi_table.tables().len(),
|
||||
EBDA_START + size_of::<AcpiTableRsdp>() as u64,
|
||||
acpi_table.tables().len() as u64,
|
||||
acpi_table.tables(),
|
||||
)?;
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ pub enum FwCfgContent {
|
|||
|
||||
struct FwCfgContentAccess<'a> {
|
||||
content: &'a FwCfgContent,
|
||||
offset: usize,
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
impl<'a> Read for FwCfgContentAccess<'a> {
|
||||
|
@ -116,15 +116,15 @@ impl<'a> Read for FwCfgContentAccess<'a> {
|
|||
Seek::seek(&mut (&*f), SeekFrom::Start(offset + self.offset as u64))?;
|
||||
Read::read(&mut (&*f), buf)
|
||||
}
|
||||
FwCfgContent::Bytes(b) => match b.get(self.offset..) {
|
||||
FwCfgContent::Bytes(b) => match b.get(self.offset as usize..) {
|
||||
Some(mut s) => s.read(buf),
|
||||
None => Err(ErrorKind::UnexpectedEof)?,
|
||||
},
|
||||
FwCfgContent::Slice(b) => match b.get(self.offset..) {
|
||||
FwCfgContent::Slice(b) => match b.get(self.offset as usize..) {
|
||||
Some(mut s) => s.read(buf),
|
||||
None => Err(ErrorKind::UnexpectedEof)?,
|
||||
},
|
||||
FwCfgContent::U32(n) => match n.to_le_bytes().get(self.offset..) {
|
||||
FwCfgContent::U32(n) => match n.to_le_bytes().get(self.offset as usize..) {
|
||||
Some(mut s) => s.read(buf),
|
||||
None => Err(ErrorKind::UnexpectedEof)?,
|
||||
},
|
||||
|
@ -139,17 +139,17 @@ impl Default for FwCfgContent {
|
|||
}
|
||||
|
||||
impl FwCfgContent {
|
||||
fn size(&self) -> Result<usize> {
|
||||
fn size(&self) -> Result<u32> {
|
||||
let ret = match self {
|
||||
FwCfgContent::Bytes(v) => v.len(),
|
||||
FwCfgContent::File(offset, f) => (f.metadata()?.len() - offset) as usize,
|
||||
FwCfgContent::Slice(s) => s.len(),
|
||||
FwCfgContent::U32(n) => size_of_val(n),
|
||||
};
|
||||
Ok(ret)
|
||||
u32::try_from(ret).map_err(|_| std::io::ErrorKind::InvalidInput.into())
|
||||
}
|
||||
|
||||
fn access(&self, offset: usize) -> FwCfgContentAccess {
|
||||
fn access(&self, offset: u32) -> FwCfgContentAccess {
|
||||
FwCfgContentAccess {
|
||||
content: self,
|
||||
offset,
|
||||
|
@ -167,7 +167,7 @@ pub struct FwCfgItem {
|
|||
#[derive(Debug)]
|
||||
pub struct FwCfg {
|
||||
selector: u16,
|
||||
data_offset: usize,
|
||||
data_offset: u32,
|
||||
dma_address: u64,
|
||||
items: Vec<FwCfgItem>, // 0x20 and above
|
||||
known_items: [FwCfgContent; FW_CFG_KNOWN_ITEMS], // 0x0 to 0x19
|
||||
|
@ -246,7 +246,7 @@ impl FwCfg {
|
|||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub(crate) fn add_e820(&mut self, mem_regions: &[(usize, MemRegionEntry)]) -> Result<()> {
|
||||
pub(crate) fn add_e820(&mut self, mem_regions: &[(u64, MemRegionEntry)]) -> Result<()> {
|
||||
let mut bytes = vec![];
|
||||
for (addr, region) in mem_regions.iter() {
|
||||
let type_ = match region.type_ {
|
||||
|
@ -257,8 +257,8 @@ impl FwCfg {
|
|||
MemRegionType::Hidden => continue,
|
||||
};
|
||||
let entry = BootE820Entry {
|
||||
addr: *addr as u64,
|
||||
size: region.size as u64,
|
||||
addr: *addr,
|
||||
size: region.size,
|
||||
type_,
|
||||
};
|
||||
bytes.extend_from_slice(entry.as_bytes());
|
||||
|
@ -314,14 +314,8 @@ impl FwCfg {
|
|||
let index = self.items.len();
|
||||
let c_name = create_file_name(&item.name);
|
||||
let size = item.content.size()?;
|
||||
let item_size = if size > u32::MAX as usize {
|
||||
// TODO use FileTooLarge
|
||||
return Err(ErrorKind::Unsupported.into());
|
||||
} else {
|
||||
size as u32
|
||||
};
|
||||
let cfg_file = FwCfgFile {
|
||||
size_be: item_size.to_be(),
|
||||
size_be: size.to_be(),
|
||||
select_be: (FW_CFG_FILE_FIRST + index as u16).to_be(),
|
||||
_reserved: 0,
|
||||
name: c_name,
|
||||
|
@ -336,15 +330,15 @@ impl FwCfg {
|
|||
fn dma_read_content(
|
||||
&self,
|
||||
content: &FwCfgContent,
|
||||
offset: usize,
|
||||
len: usize,
|
||||
address: usize,
|
||||
) -> Result<usize> {
|
||||
offset: u32,
|
||||
len: u32,
|
||||
address: u64,
|
||||
) -> Result<u32> {
|
||||
let content_size = content.size()?.saturating_sub(offset);
|
||||
let op_size = std::cmp::min(content_size, len);
|
||||
let r = self
|
||||
.memory
|
||||
.write_range(address, op_size, content.access(offset));
|
||||
.write_range(address, op_size as u64, content.access(offset));
|
||||
match r {
|
||||
Err(e) => {
|
||||
log::error!("fw_cfg: dam read error: {e:x?}");
|
||||
|
@ -354,7 +348,7 @@ impl FwCfg {
|
|||
}
|
||||
}
|
||||
|
||||
fn dma_read(&mut self, selector: u16, len: usize, address: usize) -> Result<()> {
|
||||
fn dma_read(&mut self, selector: u16, len: u32, address: u64) -> Result<()> {
|
||||
let op_size = if let Some(content) = self.known_items.get(selector as usize) {
|
||||
self.dma_read_content(content, self.data_offset, len, address)
|
||||
} else if let Some(item) = self.items.get((selector - FW_CFG_FILE_FIRST) as usize) {
|
||||
|
@ -363,16 +357,16 @@ impl FwCfg {
|
|||
log::error!("fw_cfg: selector {selector:#x} does not exist.");
|
||||
Err(ErrorKind::NotFound.into())
|
||||
}?;
|
||||
self.data_offset += op_size as usize;
|
||||
self.data_offset += op_size;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dma_write(&self, _selector: u16, _len: usize, _address: usize) -> Result<()> {
|
||||
fn dma_write(&self, _selector: u16, _len: u32, _address: u64) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn do_dma(&mut self) {
|
||||
let dma_address = self.dma_address as usize;
|
||||
let dma_address = self.dma_address;
|
||||
let dma_access = match self.memory.read::<FwCfgDmaAccess>(dma_address) {
|
||||
Ok(access) => access,
|
||||
Err(e) => {
|
||||
|
@ -384,8 +378,8 @@ impl FwCfg {
|
|||
if control.select() {
|
||||
self.selector = control.select() as u16;
|
||||
}
|
||||
let len = u32::from_be(dma_access.length_be) as usize;
|
||||
let addr = u64::from_be(dma_access.address_be) as usize;
|
||||
let len = u32::from_be(dma_access.length_be);
|
||||
let addr = u64::from_be(dma_access.address_be);
|
||||
let ret = if control.read() {
|
||||
self.dma_read(self.selector, len, addr)
|
||||
} else if control.write() {
|
||||
|
@ -402,17 +396,17 @@ impl FwCfg {
|
|||
access_resp.set_error(true);
|
||||
}
|
||||
if let Err(e) = self.memory.write(
|
||||
dma_address + FwCfgDmaAccess::OFFSET_CONTROL_BE,
|
||||
dma_address + FwCfgDmaAccess::OFFSET_CONTROL_BE as u64,
|
||||
&access_resp.0.to_be(),
|
||||
) {
|
||||
log::error!("fw_cfg: finishing dma: {e:?}")
|
||||
}
|
||||
}
|
||||
|
||||
fn read_content(content: &FwCfgContent, offset: usize) -> Option<u8> {
|
||||
fn read_content(content: &FwCfgContent, offset: u32) -> Option<u8> {
|
||||
match content {
|
||||
FwCfgContent::Bytes(b) => b.get(offset).copied(),
|
||||
FwCfgContent::Slice(s) => s.get(offset).copied(),
|
||||
FwCfgContent::Bytes(b) => b.get(offset as usize).copied(),
|
||||
FwCfgContent::Slice(s) => s.get(offset as usize).copied(),
|
||||
FwCfgContent::File(o, f) => {
|
||||
let mut buf = [0u8];
|
||||
match f.read_exact_at(&mut buf, o + offset as u64) {
|
||||
|
@ -423,7 +417,7 @@ impl FwCfg {
|
|||
}
|
||||
}
|
||||
}
|
||||
FwCfgContent::U32(n) => n.to_le_bytes().get(offset).copied(),
|
||||
FwCfgContent::U32(n) => n.to_le_bytes().get(offset as usize).copied(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -454,11 +448,11 @@ impl FwCfg {
|
|||
}
|
||||
|
||||
impl Mmio for Mutex<FwCfg> {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
16
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
let mut fw_cfg = self.lock();
|
||||
let port = offset as u16 + PORT_SELECTOR;
|
||||
let ret = match (port, size) {
|
||||
|
@ -485,7 +479,7 @@ impl Mmio for Mutex<FwCfg> {
|
|||
Ok(ret)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
let mut fw_cfg = self.lock();
|
||||
let port = offset as u16 + PORT_SELECTOR;
|
||||
match (port, size) {
|
||||
|
|
|
@ -36,18 +36,18 @@ const PVPANIC_VENDOR_ID: u16 = 0x1b36;
|
|||
const PVPANIC_DEVICE_ID: u16 = 0x0011;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PvPanicBar<const N: usize>;
|
||||
struct PvPanicBar<const N: u64>;
|
||||
|
||||
impl<const N: usize> Mmio for PvPanicBar<N> {
|
||||
fn size(&self) -> usize {
|
||||
impl<const N: u64> Mmio for PvPanicBar<N> {
|
||||
fn size(&self) -> u64 {
|
||||
N
|
||||
}
|
||||
|
||||
fn read(&self, _offset: usize, _size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, _offset: u64, _size: u8) -> mem::Result<u64> {
|
||||
Ok(PvPanicByte::all().bits() as u64)
|
||||
}
|
||||
|
||||
fn write(&self, _offset: usize, _size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, _offset: u64, _size: u8, val: u64) -> mem::Result<()> {
|
||||
log::info!("pvpanic: {:x?}", PvPanicByte::from_bits_retain(val as u8));
|
||||
Err(mem::Error::Action(Action::Shutdown))
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ pub struct PvPanic {
|
|||
|
||||
impl PvPanic {
|
||||
pub fn new() -> Self {
|
||||
const BAR_SIZE: usize = 0x1000;
|
||||
const BAR_SIZE: u64 = 0x1000;
|
||||
let header = DeviceHeader {
|
||||
common: CommonHeader {
|
||||
vendor: PVPANIC_VENDOR_ID,
|
||||
|
|
|
@ -201,11 +201,11 @@ impl<I> Mmio for Serial<I>
|
|||
where
|
||||
I: IntxSender + Sync + Send + 'static,
|
||||
{
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
8
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, _size: u8) -> Result<u64, mem::Error> {
|
||||
fn read(&self, offset: u64, _size: u8) -> Result<u64, mem::Error> {
|
||||
let mut reg = self.reg.lock();
|
||||
let ret = match offset as u16 {
|
||||
DIVISOR_LATCH_LSB if reg.line_control.divisor_latch_access() => reg.divisor as u8,
|
||||
|
@ -241,7 +241,7 @@ where
|
|||
Ok(ret as u64)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, _size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
fn write(&self, offset: u64, _size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
let byte = val as u8;
|
||||
let mut reg = self.reg.lock();
|
||||
match offset as u16 {
|
||||
|
|
|
@ -50,13 +50,13 @@ pub enum Error {
|
|||
GuestMap {
|
||||
hva: usize,
|
||||
gpa: u64,
|
||||
size: usize,
|
||||
size: u64,
|
||||
error: std::io::Error,
|
||||
},
|
||||
#[snafu(display("Failed to unmap gpa {gpa:#x}, size {size:#x}"))]
|
||||
GuestUnmap {
|
||||
gpa: u64,
|
||||
size: usize,
|
||||
size: u64,
|
||||
error: std::io::Error,
|
||||
},
|
||||
#[snafu(display("Hypervisor is missing capability: {cap}"))]
|
||||
|
@ -169,13 +169,13 @@ pub trait VmMemory: Debug + Send + Sync + 'static {
|
|||
fn mem_map(
|
||||
&self,
|
||||
slot: u32,
|
||||
gpa: usize,
|
||||
size: usize,
|
||||
gpa: u64,
|
||||
size: u64,
|
||||
hva: usize,
|
||||
option: MemMapOption,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
fn unmap(&self, slot: u32, gpa: usize, size: usize) -> Result<(), Error>;
|
||||
fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<(), Error>;
|
||||
|
||||
fn max_mem_slots(&self) -> Result<u32, Error>;
|
||||
|
||||
|
@ -194,7 +194,7 @@ pub trait IoeventFd: Debug + Send + Sync + AsFd + 'static {}
|
|||
pub trait IoeventFdRegistry: Debug + Send + Sync + 'static {
|
||||
type IoeventFd: IoeventFd;
|
||||
fn create(&self) -> Result<Self::IoeventFd>;
|
||||
fn register(&self, fd: &Self::IoeventFd, gpa: usize, len: u8, data: Option<u64>) -> Result<()>;
|
||||
fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()>;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn register_port(
|
||||
&self,
|
||||
|
@ -289,7 +289,7 @@ pub enum VmExit {
|
|||
size: u8,
|
||||
},
|
||||
Mmio {
|
||||
addr: usize,
|
||||
addr: u64,
|
||||
write: Option<u64>,
|
||||
size: u8,
|
||||
},
|
||||
|
|
|
@ -55,8 +55,8 @@ impl VmMemory for HvfMemory {
|
|||
fn mem_map(
|
||||
&self,
|
||||
_slot: u32,
|
||||
_gpa: usize,
|
||||
_size: usize,
|
||||
_gpa: u64,
|
||||
_size: u64,
|
||||
_hva: usize,
|
||||
_option: MemMapOption,
|
||||
) -> Result<()> {
|
||||
|
@ -67,7 +67,7 @@ impl VmMemory for HvfMemory {
|
|||
unimplemented!()
|
||||
}
|
||||
|
||||
fn unmap(&self, _slot: u32, _gpa: usize, _size: usize) -> Result<()> {
|
||||
fn unmap(&self, _slot: u32, _gpa: u64, _size: u64) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ impl IoeventFdRegistry for HvfIoeventFdRegistry {
|
|||
fn register(
|
||||
&self,
|
||||
_fd: &Self::IoeventFd,
|
||||
_gpa: usize,
|
||||
_gpa: u64,
|
||||
_len: u8,
|
||||
_data: Option<u64>,
|
||||
) -> Result<()> {
|
||||
|
|
|
@ -150,8 +150,8 @@ impl VmMemory for KvmMemory {
|
|||
fn mem_map(
|
||||
&self,
|
||||
slot: u32,
|
||||
gpa: usize,
|
||||
size: usize,
|
||||
gpa: u64,
|
||||
size: u64,
|
||||
hva: usize,
|
||||
option: MemMapOption,
|
||||
) -> Result<(), Error> {
|
||||
|
@ -174,7 +174,7 @@ impl VmMemory for KvmMemory {
|
|||
userspace_addr: hva as _,
|
||||
flags,
|
||||
guest_memfd: memfd.as_raw_fd() as _,
|
||||
guest_memfd_offset: gpa as u64,
|
||||
guest_memfd_offset: gpa,
|
||||
..Default::default()
|
||||
};
|
||||
unsafe { kvm_set_user_memory_region2(&self.vm, ®ion) }
|
||||
|
@ -188,27 +188,21 @@ impl VmMemory for KvmMemory {
|
|||
};
|
||||
unsafe { kvm_set_user_memory_region(&self.vm, ®ion) }
|
||||
}
|
||||
.context(error::GuestMap {
|
||||
hva,
|
||||
gpa: gpa as u64,
|
||||
size,
|
||||
})?;
|
||||
.context(error::GuestMap { hva, gpa, size })?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unmap(&self, slot: u32, gpa: usize, size: usize) -> Result<(), Error> {
|
||||
fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<(), Error> {
|
||||
let flags = KvmMemFlag::empty();
|
||||
let region = KvmUserspaceMemoryRegion {
|
||||
slot,
|
||||
guest_phys_addr: gpa as _,
|
||||
guest_phys_addr: gpa,
|
||||
memory_size: 0,
|
||||
userspace_addr: 0,
|
||||
flags,
|
||||
};
|
||||
unsafe { kvm_set_user_memory_region(&self.vm, ®ion) }.context(error::GuestUnmap {
|
||||
gpa: gpa as u64,
|
||||
size,
|
||||
})?;
|
||||
unsafe { kvm_set_user_memory_region(&self.vm, ®ion) }
|
||||
.context(error::GuestUnmap { gpa, size })?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -513,9 +507,9 @@ impl IoeventFdRegistry for KvmIoeventFdRegistry {
|
|||
})
|
||||
}
|
||||
|
||||
fn register(&self, fd: &Self::IoeventFd, gpa: usize, len: u8, data: Option<u64>) -> Result<()> {
|
||||
fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()> {
|
||||
let mut request = KvmIoEventFd {
|
||||
addr: gpa as u64,
|
||||
addr: gpa,
|
||||
len: len as u32,
|
||||
fd: fd.as_fd().as_raw_fd(),
|
||||
..Default::default()
|
||||
|
|
|
@ -22,7 +22,7 @@ impl KvmVcpu {
|
|||
pub(super) fn handle_mmio(&mut self) -> Result<VmExit, Error> {
|
||||
let kvm_mmio = unsafe { &self.kvm_run.exit.mmio };
|
||||
let exit = VmExit::Mmio {
|
||||
addr: kvm_mmio.phys_addr as usize,
|
||||
addr: kvm_mmio.phys_addr,
|
||||
write: if kvm_mmio.is_write > 0 {
|
||||
Some(u64::from_ne_bytes(kvm_mmio.data))
|
||||
} else {
|
||||
|
|
|
@ -21,15 +21,15 @@ impl crate::hv::VmMemory for FakeVmMemory {
|
|||
fn mem_map(
|
||||
&self,
|
||||
_slot: u32,
|
||||
_gpa: usize,
|
||||
_size: usize,
|
||||
_gpa: u64,
|
||||
_size: u64,
|
||||
_hva: usize,
|
||||
_option: MemMapOption,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unmap(&self, _slot: u32, _gpa: usize, _size: usize) -> Result<()> {
|
||||
fn unmap(&self, _slot: u32, _gpa: u64, _size: u64) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -25,10 +25,10 @@ use crate::mem::{AddrOpt, MemRegion, MemRegionType, Memory};
|
|||
|
||||
pub fn load<P: AsRef<Path>>(memory: &Memory, path: P) -> Result<(InitState, ArcMemPages)> {
|
||||
let mut file = File::open(path)?;
|
||||
let size = file.metadata()?.len() as usize;
|
||||
let size = file.metadata()?.len();
|
||||
assert_eq!(size & 0xfff, 0);
|
||||
|
||||
let mut rom = ArcMemPages::from_memfd(size, None, Some(c"rom"))?;
|
||||
let mut rom = ArcMemPages::from_memfd(size as usize, None, Some(c"rom"))?;
|
||||
file.read_exact(rom.as_slice_mut())?;
|
||||
|
||||
let gpa = MEM_64_START - size;
|
||||
|
|
|
@ -20,7 +20,7 @@ use crate::mem::MemRegionEntry;
|
|||
|
||||
pub fn load<P: AsRef<Path>>(
|
||||
_memory: &RamBus,
|
||||
_mem_regions: &[(usize, MemRegionEntry)],
|
||||
_mem_regions: &[(u64, MemRegionEntry)],
|
||||
_kernel: P,
|
||||
_cmd_line: Option<&str>,
|
||||
_initramfs: Option<P>,
|
||||
|
|
|
@ -42,7 +42,7 @@ const MINIMAL_VERSION: u16 = 0x020c;
|
|||
|
||||
pub fn load<P: AsRef<Path>>(
|
||||
memory: &RamBus,
|
||||
mem_regions: &[(usize, MemRegionEntry)],
|
||||
mem_regions: &[(u64, MemRegionEntry)],
|
||||
kernel: P,
|
||||
cmd_line: Option<&str>,
|
||||
initramfs: Option<P>,
|
||||
|
@ -92,11 +92,15 @@ pub fn load<P: AsRef<Path>>(
|
|||
// load cmd line
|
||||
if let Some(cmd_line) = cmd_line {
|
||||
let cmd_line_limit =
|
||||
std::cmp::min(boot_params.hdr.cmdline_size as usize, KERNEL_CMD_LINE_LIMIT);
|
||||
if cmd_line.len() > cmd_line_limit {
|
||||
std::cmp::min(boot_params.hdr.cmdline_size as u64, KERNEL_CMD_LINE_LIMIT);
|
||||
if cmd_line.len() as u64 > cmd_line_limit {
|
||||
return Err(Error::CmdLineTooLong(cmd_line.len(), cmd_line_limit));
|
||||
}
|
||||
memory.write_range(KERNEL_CMD_LINE_START, cmd_line.len(), cmd_line.as_bytes())?;
|
||||
memory.write_range(
|
||||
KERNEL_CMD_LINE_START,
|
||||
cmd_line.len() as u64,
|
||||
cmd_line.as_bytes(),
|
||||
)?;
|
||||
boot_params.hdr.cmd_line_ptr = KERNEL_CMD_LINE_START as u32;
|
||||
boot_params.ext_cmd_line_ptr = (KERNEL_CMD_LINE_START >> 32) as u32;
|
||||
}
|
||||
|
@ -104,18 +108,18 @@ pub fn load<P: AsRef<Path>>(
|
|||
// load kernel image
|
||||
let kernel_offset = (boot_params.hdr.setup_sects as u64 + 1) * 512;
|
||||
kernel.seek(SeekFrom::Start(kernel_offset))?;
|
||||
let kernel_size = (kernel_meta.len() - kernel_offset) as usize;
|
||||
let kernel_size = kernel_meta.len() - kernel_offset;
|
||||
memory.write_range(KERNEL_IMAGE_START, kernel_size, kernel)?;
|
||||
|
||||
// load initramfs
|
||||
let initramfs_range;
|
||||
if let Some(initramfs) = initramfs {
|
||||
let initramfs = File::open(initramfs)?;
|
||||
let initramfs_size = initramfs.metadata()?.len() as usize;
|
||||
let initramfs_size = initramfs.metadata()?.len();
|
||||
let initramfs_gpa = search_initramfs_address(
|
||||
mem_regions,
|
||||
initramfs_size,
|
||||
boot_params.hdr.initrd_addr_max as usize,
|
||||
boot_params.hdr.initrd_addr_max as u64,
|
||||
)?;
|
||||
let initramfs_end = initramfs_gpa + initramfs_size;
|
||||
memory.write_range(initramfs_gpa, initramfs_size, initramfs)?;
|
||||
|
@ -144,36 +148,36 @@ pub fn load<P: AsRef<Path>>(
|
|||
MemRegionType::Hidden => continue,
|
||||
};
|
||||
boot_params.e820_table[region_index] = BootE820Entry {
|
||||
addr: *addr as u64,
|
||||
size: region.size as u64,
|
||||
addr: *addr,
|
||||
size: region.size,
|
||||
type_,
|
||||
};
|
||||
region_index += 1;
|
||||
}
|
||||
boot_params.e820_entries = mem_regions.len() as u8;
|
||||
|
||||
boot_params.acpi_rsdp_addr = EBDA_START as u64;
|
||||
boot_params.acpi_rsdp_addr = EBDA_START;
|
||||
|
||||
memory.write(LINUX_BOOT_PARAMS_START, &boot_params)?;
|
||||
|
||||
// set up identity paging
|
||||
let pml4_start = BOOT_PAGING_START;
|
||||
let pdpt_start = pml4_start + 0x1000;
|
||||
let pml4e = (Entry::P | Entry::RW).bits() as u64 | pdpt_start as u64;
|
||||
let pml4e = (Entry::P | Entry::RW).bits() as u64 | pdpt_start;
|
||||
memory.write(pml4_start, &pml4e)?;
|
||||
let alignment = boot_params.hdr.kernel_alignment as usize;
|
||||
let alignment = boot_params.hdr.kernel_alignment as u64;
|
||||
let runtime_start = (KERNEL_IMAGE_START + alignment - 1) & !(alignment - 1);
|
||||
let max_addr = std::cmp::max(
|
||||
runtime_start + boot_params.hdr.init_size as usize,
|
||||
runtime_start + boot_params.hdr.init_size as u64,
|
||||
std::cmp::max(
|
||||
LINUX_BOOT_PARAMS_START + size_of::<BootParams>(),
|
||||
LINUX_BOOT_PARAMS_START + size_of::<BootParams>() as u64,
|
||||
KERNEL_CMD_LINE_START + KERNEL_CMD_LINE_LIMIT,
|
||||
),
|
||||
);
|
||||
let num_page = (max_addr as u64 + (1 << 30) - 1) >> 30;
|
||||
for i in 0..num_page {
|
||||
let pdpte = (i << 30) | (Entry::P | Entry::RW | Entry::PS).bits() as u64;
|
||||
memory.write(pdpt_start + i as usize * size_of::<u64>(), &pdpte)?;
|
||||
memory.write(pdpt_start + i * size_of::<u64>() as u64, &pdpte)?;
|
||||
}
|
||||
|
||||
// set up gdt
|
||||
|
@ -210,7 +214,7 @@ pub fn load<P: AsRef<Path>>(
|
|||
boot_ldtr.to_desc(),
|
||||
];
|
||||
let gdtr = DtRegVal {
|
||||
base: BOOT_GDT_START as u64,
|
||||
base: BOOT_GDT_START,
|
||||
limit: size_of_val(&gdt) as u16 - 1,
|
||||
};
|
||||
let idtr = DtRegVal { base: 0, limit: 0 };
|
||||
|
@ -218,14 +222,14 @@ pub fn load<P: AsRef<Path>>(
|
|||
|
||||
Ok(InitState {
|
||||
regs: vec![
|
||||
(Reg::Rsi, LINUX_BOOT_PARAMS_START as u64),
|
||||
(Reg::Rip, KERNEL_IMAGE_START as u64 + 0x200),
|
||||
(Reg::Rsi, LINUX_BOOT_PARAMS_START),
|
||||
(Reg::Rip, KERNEL_IMAGE_START + 0x200),
|
||||
(Reg::Rflags, Rflags::RESERVED_1.bits() as u64),
|
||||
],
|
||||
sregs: vec![
|
||||
(SReg::Efer, (Efer::LMA | Efer::LME).bits() as u64),
|
||||
(SReg::Cr0, (Cr0::NE | Cr0::PE | Cr0::PG).bits() as u64),
|
||||
(SReg::Cr3, pml4_start as u64),
|
||||
(SReg::Cr3, pml4_start),
|
||||
(SReg::Cr4, Cr4::PAE.bits() as u64),
|
||||
],
|
||||
seg_regs: vec![
|
||||
|
|
|
@ -56,7 +56,7 @@ pub struct InitState {
|
|||
pub dt_regs: Vec<(DtReg, DtRegVal)>,
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub seg_regs: Vec<(SegReg, SegRegVal)>,
|
||||
pub initramfs: Option<Range<usize>>,
|
||||
pub initramfs: Option<Range<u64>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
@ -80,7 +80,7 @@ pub enum Error {
|
|||
NotRelocatableKernel,
|
||||
|
||||
#[error("kernel command line too long, length: {0}, limit: {1}")]
|
||||
CmdLineTooLong(usize, usize),
|
||||
CmdLineTooLong(usize, u64),
|
||||
|
||||
#[error("cannot load initramfs at {addr:#x} - {max:#x}, initramfs max address: {addr_max:#x}")]
|
||||
InitramfsAddrLimit {
|
||||
|
@ -103,10 +103,10 @@ pub enum Error {
|
|||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
pub fn search_initramfs_address(
|
||||
entries: &[(usize, MemRegionEntry)],
|
||||
size: usize,
|
||||
addr_max: usize,
|
||||
) -> Result<usize, Error> {
|
||||
entries: &[(u64, MemRegionEntry)],
|
||||
size: u64,
|
||||
addr_max: u64,
|
||||
) -> Result<u64, Error> {
|
||||
for (start, entry) in entries.iter().rev() {
|
||||
let region_max = entry.size - 1 + start;
|
||||
let limit = std::cmp::min(region_max, addr_max);
|
||||
|
|
|
@ -91,7 +91,7 @@ fn search_pvh_note<F: Read + Seek>(
|
|||
// https://xenbits.xen.org/docs/4.18-testing/misc/pvh.html
|
||||
pub fn load<P: AsRef<Path>>(
|
||||
memory: &RamBus,
|
||||
mem_regions: &[(usize, MemRegionEntry)],
|
||||
mem_regions: &[(u64, MemRegionEntry)],
|
||||
kernel: P,
|
||||
cmd_line: Option<&str>,
|
||||
initramfs: Option<P>,
|
||||
|
@ -125,8 +125,8 @@ pub fn load<P: AsRef<Path>>(
|
|||
)?;
|
||||
}
|
||||
if program_header.file_sz > 0 {
|
||||
let addr = program_header.paddr as usize;
|
||||
let size = program_header.file_sz as usize;
|
||||
let addr = program_header.paddr;
|
||||
let size = program_header.file_sz;
|
||||
kernel_file.seek(SeekFrom::Start(program_header.offset))?;
|
||||
memory.write_range(addr, size, &mut kernel_file)?;
|
||||
log::info!("loaded at {:#x?}-{:#x?}", addr, addr + size);
|
||||
|
@ -162,8 +162,8 @@ pub fn load<P: AsRef<Path>>(
|
|||
start_info: HvmStartInfo {
|
||||
magic: XEN_HVM_START_MAGIC_VALUE,
|
||||
version: XEN_HVM_START_INFO_V1,
|
||||
cmdline_paddr: KERNEL_CMD_LINE_START as u64,
|
||||
rsdp_paddr: EBDA_START as u64,
|
||||
cmdline_paddr: KERNEL_CMD_LINE_START,
|
||||
rsdp_paddr: EBDA_START,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
|
@ -171,26 +171,30 @@ pub fn load<P: AsRef<Path>>(
|
|||
|
||||
// load cmd line
|
||||
if let Some(cmd_line) = cmd_line {
|
||||
if cmd_line.len() > KERNEL_CMD_LINE_LIMIT {
|
||||
if cmd_line.len() as u64 > KERNEL_CMD_LINE_LIMIT {
|
||||
return Err(Error::CmdLineTooLong(cmd_line.len(), KERNEL_CMD_LINE_LIMIT));
|
||||
}
|
||||
memory.write_range(KERNEL_CMD_LINE_START, cmd_line.len(), cmd_line.as_bytes())?;
|
||||
start_info_page.start_info.cmdline_paddr = KERNEL_CMD_LINE_START as u64;
|
||||
memory.write_range(
|
||||
KERNEL_CMD_LINE_START,
|
||||
cmd_line.len() as u64,
|
||||
cmd_line.as_bytes(),
|
||||
)?;
|
||||
start_info_page.start_info.cmdline_paddr = KERNEL_CMD_LINE_START;
|
||||
}
|
||||
|
||||
// load initramfs
|
||||
let initramfs_range;
|
||||
if let Some(initramfs) = initramfs {
|
||||
let initramfs = File::open(initramfs)?;
|
||||
let initramfs_size = initramfs.metadata()?.len() as usize;
|
||||
let initramfs_size = initramfs.metadata()?.len();
|
||||
let initramfs_gpa = search_initramfs_address(mem_regions, initramfs_size, 2 << 30)?;
|
||||
let initramfs_end = initramfs_gpa + initramfs_size;
|
||||
memory.write_range(initramfs_gpa, initramfs_size, initramfs)?;
|
||||
start_info_page.start_info.nr_modules = 1;
|
||||
start_info_page.start_info.modlist_paddr =
|
||||
(HVM_START_INFO_START + offset_of!(StartInfoPage, initramfs)) as u64;
|
||||
HVM_START_INFO_START + offset_of!(StartInfoPage, initramfs) as u64;
|
||||
start_info_page.initramfs.paddr = initramfs_gpa as u64;
|
||||
start_info_page.initramfs.size = initramfs_size as u64;
|
||||
start_info_page.initramfs.size = initramfs_size;
|
||||
log::info!(
|
||||
"initramfs loaded at {:#x} - {:#x}, ",
|
||||
initramfs_gpa,
|
||||
|
@ -212,8 +216,8 @@ pub fn load<P: AsRef<Path>>(
|
|||
MemRegionType::Hidden => continue,
|
||||
};
|
||||
start_info_page.memory_map[index] = HvmMemmapTableEntry {
|
||||
addr: *addr as u64,
|
||||
size: region.size as u64,
|
||||
addr: *addr,
|
||||
size: region.size,
|
||||
type_,
|
||||
reserved: 0,
|
||||
};
|
||||
|
@ -221,7 +225,7 @@ pub fn load<P: AsRef<Path>>(
|
|||
}
|
||||
start_info_page.start_info.memmap_entries = index as u32;
|
||||
start_info_page.start_info.memmap_paddr =
|
||||
(HVM_START_INFO_START + offset_of!(StartInfoPage, memory_map)) as u64;
|
||||
HVM_START_INFO_START + offset_of!(StartInfoPage, memory_map) as u64;
|
||||
|
||||
memory.write(HVM_START_INFO_START, &start_info_page)?;
|
||||
|
||||
|
@ -259,7 +263,7 @@ pub fn load<P: AsRef<Path>>(
|
|||
boot_ldtr.to_desc(),
|
||||
];
|
||||
let gdtr = DtRegVal {
|
||||
base: BOOT_GDT_START as u64,
|
||||
base: BOOT_GDT_START,
|
||||
limit: size_of_val(&gdt) as u16 - 1,
|
||||
};
|
||||
memory.write(BOOT_GDT_START, &gdt)?;
|
||||
|
@ -268,7 +272,7 @@ pub fn load<P: AsRef<Path>>(
|
|||
|
||||
Ok(InitState {
|
||||
regs: vec![
|
||||
(Reg::Rbx, HVM_START_INFO_START as u64),
|
||||
(Reg::Rbx, HVM_START_INFO_START),
|
||||
(Reg::Rflags, Rflags::RESERVED_1.bits() as u64),
|
||||
(Reg::Rip, entry_point),
|
||||
],
|
||||
|
|
|
@ -18,7 +18,7 @@ use crate::align_up;
|
|||
use crate::mem::{Error, Result};
|
||||
|
||||
pub trait SlotBackend {
|
||||
fn size(&self) -> usize;
|
||||
fn size(&self) -> u64;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -26,7 +26,7 @@ struct Slot<B>
|
|||
where
|
||||
B: SlotBackend,
|
||||
{
|
||||
addr: usize,
|
||||
addr: u64,
|
||||
backend: B,
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ impl<B> Slot<B>
|
|||
where
|
||||
B: SlotBackend,
|
||||
{
|
||||
fn new(addr: usize, backend: B) -> Result<Self> {
|
||||
fn new(addr: u64, backend: B) -> Result<Self> {
|
||||
if backend.size() == 0 {
|
||||
return Err(Error::ZeroSizedSlot);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
fn max_addr(&self) -> usize {
|
||||
fn max_addr(&self) -> u64 {
|
||||
(self.backend.size() - 1) + self.addr
|
||||
}
|
||||
}
|
||||
|
@ -77,14 +77,14 @@ where
|
|||
Self::default()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (usize, &B)> {
|
||||
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (u64, &B)> {
|
||||
self.slots.iter().map(|slot| (slot.addr, &slot.backend))
|
||||
}
|
||||
|
||||
pub fn drain(
|
||||
&mut self,
|
||||
range: impl RangeBounds<usize>,
|
||||
) -> impl DoubleEndedIterator<Item = (usize, B)> + '_ {
|
||||
) -> impl DoubleEndedIterator<Item = (u64, B)> + '_ {
|
||||
self.slots.drain(range).map(|s| (s.addr, s.backend))
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ where
|
|||
self.slots.is_empty()
|
||||
}
|
||||
|
||||
pub fn last(&self) -> Option<(usize, &B)> {
|
||||
pub fn last(&self) -> Option<(u64, &B)> {
|
||||
self.slots.last().map(|slot| (slot.addr, &slot.backend))
|
||||
}
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ impl<B> Addressable<B>
|
|||
where
|
||||
B: SlotBackend,
|
||||
{
|
||||
pub fn add(&mut self, addr: usize, backend: B) -> Result<&mut B> {
|
||||
pub fn add(&mut self, addr: u64, backend: B) -> Result<&mut B> {
|
||||
let slot = Slot::new(addr, backend)?;
|
||||
let result = match self.slots.binary_search_by_key(&addr, |s| s.addr) {
|
||||
Ok(index) => Err(&self.slots[index]),
|
||||
|
@ -128,13 +128,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn add_within(
|
||||
&mut self,
|
||||
start: usize,
|
||||
max: usize,
|
||||
align: usize,
|
||||
backend: B,
|
||||
) -> Result<usize> {
|
||||
pub fn add_within(&mut self, start: u64, max: u64, align: u64, backend: B) -> Result<u64> {
|
||||
if backend.size() == 0 {
|
||||
return Err(Error::ZeroSizedSlot);
|
||||
}
|
||||
|
@ -173,14 +167,14 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, addr: usize) -> Result<B> {
|
||||
pub fn remove(&mut self, addr: u64) -> Result<B> {
|
||||
match self.slots.binary_search_by_key(&addr, |s| s.addr) {
|
||||
Ok(index) => Ok(self.slots.remove(index).backend),
|
||||
Err(_) => Err(Error::NotMapped(addr)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn search(&self, addr: usize) -> Option<(usize, &B)> {
|
||||
pub fn search(&self, addr: u64) -> Option<(u64, &B)> {
|
||||
match self.slots.binary_search_by_key(&addr, |s| s.addr) {
|
||||
Ok(index) => Some((self.slots[index].addr, &self.slots[index].backend)),
|
||||
Err(0) => None,
|
||||
|
@ -195,7 +189,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn search_next(&self, addr: usize) -> Option<(usize, &B)> {
|
||||
pub fn search_next(&self, addr: u64) -> Option<(u64, &B)> {
|
||||
match self.slots.binary_search_by_key(&addr, |s| s.addr) {
|
||||
Ok(index) => Some((self.slots[index].addr, &self.slots[index].backend)),
|
||||
Err(0) => None,
|
||||
|
@ -219,11 +213,11 @@ mod test {
|
|||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Backend {
|
||||
size: usize,
|
||||
size: u64,
|
||||
}
|
||||
|
||||
impl SlotBackend for Backend {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
@ -231,10 +225,10 @@ mod test {
|
|||
#[test]
|
||||
fn test_new_slot() {
|
||||
assert_matches!(
|
||||
Slot::new(usize::MAX, Backend { size: 0x10 }),
|
||||
Slot::new(u64::MAX, Backend { size: 0x10 }),
|
||||
Err(Error::OutOfRange {
|
||||
size: 0x10,
|
||||
addr: usize::MAX,
|
||||
addr: u64::MAX,
|
||||
})
|
||||
);
|
||||
assert_matches!(Slot::new(0, Backend { size: 0 }), Err(Error::ZeroSizedSlot));
|
||||
|
@ -334,11 +328,11 @@ mod test {
|
|||
assert_matches!(memory.remove(0x2001), Err(Error::NotMapped(0x2001)));
|
||||
|
||||
assert_matches!(
|
||||
memory.add(0usize.wrapping_sub(0x2000), Backend { size: 0x2000 }),
|
||||
memory.add(0u64.wrapping_sub(0x2000), Backend { size: 0x2000 }),
|
||||
Ok(_)
|
||||
);
|
||||
assert_matches!(
|
||||
memory.add(0usize.wrapping_sub(0x1000), Backend { size: 0x1000 }),
|
||||
memory.add(0u64.wrapping_sub(0x1000), Backend { size: 0x1000 }),
|
||||
Err(_)
|
||||
)
|
||||
}
|
||||
|
@ -351,13 +345,13 @@ mod test {
|
|||
.unwrap_err();
|
||||
|
||||
assert_matches!(
|
||||
memory.add_within(0xff0, usize::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
memory.add_within(0xff0, u64::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
Ok(0x1000)
|
||||
);
|
||||
// slots: [0x1000, 0x1fff]
|
||||
|
||||
assert_matches!(
|
||||
memory.add_within(0, usize::MAX, 0x1000, Backend { size: 0x2000 }),
|
||||
memory.add_within(0, u64::MAX, 0x1000, Backend { size: 0x2000 }),
|
||||
Ok(0x2000)
|
||||
);
|
||||
// slots: [0x1000, 0x1fff], [0x2000, 0x3fff]
|
||||
|
@ -367,20 +361,20 @@ mod test {
|
|||
.unwrap_err();
|
||||
|
||||
assert_matches!(
|
||||
memory.add_within(0, usize::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
memory.add_within(0, u64::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
Ok(0)
|
||||
);
|
||||
// slots: [0, 0xfff], [0x1000, 0x1fff], [0x2000, 0x3fff]
|
||||
|
||||
assert_matches!(
|
||||
memory.add_within(0x5000, usize::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
memory.add_within(0x5000, u64::MAX, 0x1000, Backend { size: 0x1000 }),
|
||||
Ok(0x5000)
|
||||
);
|
||||
// slots: [0, 0xfff], [0x1000, 0x1fff], [0x2000, 0x3fff],
|
||||
// [0x5000, 0x5fff]
|
||||
|
||||
assert_matches!(
|
||||
memory.add_within(0, usize::MAX, 0x4000, Backend { size: 0x1000 }),
|
||||
memory.add_within(0, u64::MAX, 0x4000, Backend { size: 0x1000 }),
|
||||
Ok(0x4000)
|
||||
);
|
||||
// slots: [0, 0xfff], [0x1000, 0x1fff], [0x2000, 0x3fff],
|
||||
|
@ -388,8 +382,8 @@ mod test {
|
|||
|
||||
assert_matches!(
|
||||
memory.add_within(
|
||||
0usize.wrapping_sub(0x9000),
|
||||
usize::MAX,
|
||||
0u64.wrapping_sub(0x9000),
|
||||
u64::MAX,
|
||||
0x2000,
|
||||
Backend { size: 0x1000 }
|
||||
),
|
||||
|
@ -401,8 +395,8 @@ mod test {
|
|||
|
||||
assert_matches!(
|
||||
memory.add_within(
|
||||
0usize.wrapping_sub(0x4000),
|
||||
usize::MAX,
|
||||
0u64.wrapping_sub(0x4000),
|
||||
u64::MAX,
|
||||
0x1000,
|
||||
Backend { size: 0x1000 }
|
||||
),
|
||||
|
@ -415,15 +409,15 @@ mod test {
|
|||
|
||||
memory
|
||||
.add_within(
|
||||
0usize.wrapping_sub(0x9000),
|
||||
usize::MAX,
|
||||
0u64.wrapping_sub(0x9000),
|
||||
u64::MAX,
|
||||
0x1000,
|
||||
Backend { size: 0x4000 },
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
memory
|
||||
.add_within(usize::MAX - 1, usize::MAX, 0x1000, Backend { size: 0x1000 })
|
||||
.add_within(u64::MAX - 1, u64::MAX, 0x1000, Backend { size: 0x1000 })
|
||||
.unwrap_err();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,29 +20,29 @@ use crate::mem::Result;
|
|||
use parking_lot::RwLock;
|
||||
|
||||
pub trait Mmio: Debug + Send + Sync + 'static {
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64>;
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<()>;
|
||||
fn size(&self) -> usize;
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64>;
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<()>;
|
||||
fn size(&self) -> u64;
|
||||
}
|
||||
|
||||
pub type MmioRange = Arc<dyn Mmio>;
|
||||
|
||||
impl Mmio for MmioRange {
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64> {
|
||||
Mmio::read(self.as_ref(), offset, size)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<()> {
|
||||
Mmio::write(self.as_ref(), offset, size, val)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
Mmio::size(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl SlotBackend for MmioRange {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
Mmio::size(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
@ -51,12 +51,13 @@ impl SlotBackend for MmioRange {
|
|||
macro_rules! impl_mmio_for_zerocopy {
|
||||
($ty:ident) => {
|
||||
impl $crate::mem::emulated::Mmio for $ty {
|
||||
fn size(&self) -> usize {
|
||||
::core::mem::size_of::<Self>()
|
||||
fn size(&self) -> u64 {
|
||||
::core::mem::size_of::<Self>() as u64
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> $crate::mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> $crate::mem::Result<u64> {
|
||||
let bytes = AsBytes::as_bytes(self);
|
||||
let offset = offset as usize;
|
||||
let val = match size {
|
||||
1 => bytes.get(offset).map(|b| *b as u64),
|
||||
2 => u16::read_from_prefix(&bytes[offset..]).map(|w| w as u64),
|
||||
|
@ -75,7 +76,7 @@ macro_rules! impl_mmio_for_zerocopy {
|
|||
}
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> $crate::mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> $crate::mem::Result<()> {
|
||||
::log::error!(
|
||||
"{}: write 0x{val:0width$x} to readonly offset 0x{offset:x}.",
|
||||
::core::any::type_name::<Self>(),
|
||||
|
@ -118,18 +119,18 @@ where
|
|||
self.inner.read().is_empty()
|
||||
}
|
||||
|
||||
pub fn add(&self, addr: usize, range: R) -> Result<()> {
|
||||
pub fn add(&self, addr: u64, range: R) -> Result<()> {
|
||||
let mut inner = self.inner.write();
|
||||
inner.add(addr, range)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn remove(&self, addr: usize) -> Result<R> {
|
||||
pub(super) fn remove(&self, addr: u64) -> Result<R> {
|
||||
let mut inner = self.inner.write();
|
||||
inner.remove(addr)
|
||||
}
|
||||
|
||||
pub fn read(&self, addr: usize, size: u8) -> Result<u64> {
|
||||
pub fn read(&self, addr: u64, size: u8) -> Result<u64> {
|
||||
let inner = self.inner.read();
|
||||
match inner.search(addr) {
|
||||
Some((start, dev)) => dev.read(addr - start, size),
|
||||
|
@ -137,7 +138,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn write(&self, addr: usize, size: u8, val: u64) -> Result<()> {
|
||||
pub fn write(&self, addr: u64, size: u8, val: u64) -> Result<()> {
|
||||
let inner = self.inner.read();
|
||||
match inner.search(addr) {
|
||||
Some((start, dev)) => dev.write(addr - start, size, val),
|
||||
|
|
|
@ -74,8 +74,8 @@ pub struct ArcMemPages {
|
|||
}
|
||||
|
||||
impl SlotBackend for ArcMemPages {
|
||||
fn size(&self) -> usize {
|
||||
self.size
|
||||
fn size(&self) -> u64 {
|
||||
self.size as u64
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,8 +84,8 @@ impl ArcMemPages {
|
|||
self.addr
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
self.size
|
||||
pub fn size(&self) -> u64 {
|
||||
self.size as u64
|
||||
}
|
||||
|
||||
pub fn fd(&self) -> Option<BorrowedFd> {
|
||||
|
@ -143,8 +143,8 @@ impl ArcMemPages {
|
|||
let end = offset.wrapping_add(len).wrapping_sub(1);
|
||||
if offset >= self.size || end < offset {
|
||||
return Err(Error::OutOfRange {
|
||||
addr: offset,
|
||||
size: len,
|
||||
addr: offset as _,
|
||||
size: len as _,
|
||||
});
|
||||
}
|
||||
let valid_len = std::cmp::min(self.size - offset, len);
|
||||
|
@ -158,8 +158,8 @@ impl ArcMemPages {
|
|||
let s = self.get_partial_slice(offset, size_of::<T>())?;
|
||||
match FromBytes::read_from(s) {
|
||||
None => Err(Error::OutOfRange {
|
||||
addr: offset,
|
||||
size: size_of::<T>(),
|
||||
addr: offset as _,
|
||||
size: size_of::<T>() as _,
|
||||
}),
|
||||
Some(v) => Ok(v),
|
||||
}
|
||||
|
@ -172,8 +172,8 @@ impl ArcMemPages {
|
|||
let s = self.get_partial_slice_mut(offset, size_of::<T>())?;
|
||||
match AsBytes::write_to(val, s) {
|
||||
None => Err(Error::OutOfRange {
|
||||
addr: offset,
|
||||
size: size_of::<T>(),
|
||||
addr: offset as _,
|
||||
size: size_of::<T>() as _,
|
||||
}),
|
||||
Some(()) => Ok(()),
|
||||
}
|
||||
|
@ -207,8 +207,8 @@ pub struct MappedSlot {
|
|||
}
|
||||
|
||||
impl SlotBackend for MappedSlot {
|
||||
fn size(&self) -> usize {
|
||||
self.pages.size
|
||||
fn size(&self) -> u64 {
|
||||
self.pages.size as u64
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,8 +234,8 @@ impl Deref for RamLayoutGuard<'_> {
|
|||
|
||||
struct Iter<'a> {
|
||||
inner: &'a Addressable<MappedSlot>,
|
||||
gpa: usize,
|
||||
remain: usize,
|
||||
gpa: u64,
|
||||
remain: u64,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Iter<'a> {
|
||||
|
@ -246,8 +246,8 @@ impl<'a> Iterator for Iter<'a> {
|
|||
}
|
||||
let r = self.inner.get_partial_slice(self.gpa, self.remain);
|
||||
if let Ok(s) = r {
|
||||
self.gpa += s.len();
|
||||
self.remain -= s.len();
|
||||
self.gpa += s.len() as u64;
|
||||
self.remain -= s.len() as u64;
|
||||
}
|
||||
Some(r)
|
||||
}
|
||||
|
@ -255,8 +255,8 @@ impl<'a> Iterator for Iter<'a> {
|
|||
|
||||
struct IterMut<'a> {
|
||||
inner: &'a Addressable<MappedSlot>,
|
||||
gpa: usize,
|
||||
remain: usize,
|
||||
gpa: u64,
|
||||
remain: u64,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for IterMut<'a> {
|
||||
|
@ -267,15 +267,15 @@ impl<'a> Iterator for IterMut<'a> {
|
|||
}
|
||||
let r = self.inner.get_partial_slice_mut(self.gpa, self.remain);
|
||||
if let Ok(ref s) = r {
|
||||
self.gpa += s.len();
|
||||
self.remain -= s.len();
|
||||
self.gpa += s.len() as u64;
|
||||
self.remain -= s.len() as u64;
|
||||
}
|
||||
Some(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl Addressable<MappedSlot> {
|
||||
fn slice_iter(&self, gpa: usize, len: usize) -> Iter {
|
||||
fn slice_iter(&self, gpa: u64, len: u64) -> Iter {
|
||||
Iter {
|
||||
inner: self,
|
||||
gpa,
|
||||
|
@ -283,7 +283,7 @@ impl Addressable<MappedSlot> {
|
|||
}
|
||||
}
|
||||
|
||||
fn slice_iter_mut(&self, gpa: usize, len: usize) -> IterMut {
|
||||
fn slice_iter_mut(&self, gpa: u64, len: u64) -> IterMut {
|
||||
IterMut {
|
||||
inner: self,
|
||||
gpa,
|
||||
|
@ -291,36 +291,40 @@ impl Addressable<MappedSlot> {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_partial_slice(&self, gpa: usize, len: usize) -> Result<&[u8]> {
|
||||
fn get_partial_slice(&self, gpa: u64, len: u64) -> Result<&[u8]> {
|
||||
let Some((start, user_mem)) = self.search(gpa) else {
|
||||
return Err(Error::NotMapped(gpa));
|
||||
};
|
||||
user_mem.pages.get_partial_slice(gpa - start, len)
|
||||
user_mem
|
||||
.pages
|
||||
.get_partial_slice((gpa - start) as usize, len as usize)
|
||||
}
|
||||
|
||||
fn get_partial_slice_mut(&self, gpa: usize, len: usize) -> Result<&mut [u8]> {
|
||||
fn get_partial_slice_mut(&self, gpa: u64, len: u64) -> Result<&mut [u8]> {
|
||||
let Some((start, user_mem)) = self.search(gpa) else {
|
||||
return Err(Error::NotMapped(gpa));
|
||||
};
|
||||
user_mem.pages.get_partial_slice_mut(gpa - start, len)
|
||||
user_mem
|
||||
.pages
|
||||
.get_partial_slice_mut((gpa - start) as usize, len as usize)
|
||||
}
|
||||
|
||||
pub fn get_slice<T>(&self, gpa: usize, len: usize) -> Result<&[UnsafeCell<T>], Error> {
|
||||
let total_len = len * size_of::<T>();
|
||||
pub fn get_slice<T>(&self, gpa: u64, len: u64) -> Result<&[UnsafeCell<T>], Error> {
|
||||
let total_len = len * size_of::<T>() as u64;
|
||||
let host_ref = self.get_partial_slice(gpa, total_len)?;
|
||||
let ptr = host_ref.as_ptr() as *const UnsafeCell<T>;
|
||||
if host_ref.len() != total_len {
|
||||
if host_ref.len() as u64 != total_len {
|
||||
Err(Error::NotContinuous)
|
||||
} else if ptr as usize & (align_of::<T>() - 1) != 0 {
|
||||
// TODO: use is_aligned
|
||||
Err(Error::NotAligned)
|
||||
} else {
|
||||
Ok(unsafe { &*core::ptr::slice_from_raw_parts(ptr, len) })
|
||||
Ok(unsafe { &*core::ptr::slice_from_raw_parts(ptr, len as usize) })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ref<T>(&self, gpa: usize) -> Result<&UnsafeCell<T>, Error> {
|
||||
let host_ref = self.get_partial_slice(gpa, size_of::<T>())?;
|
||||
pub fn get_ref<T>(&self, gpa: u64) -> Result<&UnsafeCell<T>, Error> {
|
||||
let host_ref = self.get_partial_slice(gpa, size_of::<T>() as u64)?;
|
||||
let ptr = host_ref.as_ptr() as *const UnsafeCell<T>;
|
||||
if host_ref.len() != size_of::<T>() {
|
||||
Err(Error::NotContinuous)
|
||||
|
@ -332,19 +336,19 @@ impl Addressable<MappedSlot> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn read<T>(&self, gpa: usize) -> Result<T, Error>
|
||||
pub fn read<T>(&self, gpa: u64) -> Result<T, Error>
|
||||
where
|
||||
T: FromBytes + AsBytes,
|
||||
{
|
||||
let mut val = T::new_zeroed();
|
||||
let buf = val.as_bytes_mut();
|
||||
let host_ref = self.get_partial_slice(gpa, size_of::<T>())?;
|
||||
let host_ref = self.get_partial_slice(gpa, size_of::<T>() as u64)?;
|
||||
if host_ref.len() == buf.len() {
|
||||
buf.copy_from_slice(host_ref);
|
||||
Ok(val)
|
||||
} else {
|
||||
let mut cur = 0;
|
||||
for r in self.slice_iter(gpa, size_of::<T>()) {
|
||||
for r in self.slice_iter(gpa, size_of::<T>() as u64) {
|
||||
let s = r?;
|
||||
let s_len = s.len();
|
||||
buf[cur..(cur + s_len)].copy_from_slice(s);
|
||||
|
@ -354,18 +358,18 @@ impl Addressable<MappedSlot> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn write<T>(&self, gpa: usize, val: &T) -> Result<(), Error>
|
||||
pub fn write<T>(&self, gpa: u64, val: &T) -> Result<(), Error>
|
||||
where
|
||||
T: AsBytes,
|
||||
{
|
||||
let buf = val.as_bytes();
|
||||
let host_ref = self.get_partial_slice_mut(gpa, size_of::<T>())?;
|
||||
let host_ref = self.get_partial_slice_mut(gpa, size_of::<T>() as u64)?;
|
||||
if host_ref.len() == buf.len() {
|
||||
host_ref.copy_from_slice(buf);
|
||||
Ok(())
|
||||
} else {
|
||||
let mut cur = 0;
|
||||
for r in self.slice_iter_mut(gpa, size_of::<T>()) {
|
||||
for r in self.slice_iter_mut(gpa, size_of::<T>() as u64) {
|
||||
let s = r?;
|
||||
let s_len = s.len();
|
||||
s.copy_from_slice(&buf[cur..(cur + s_len)]);
|
||||
|
@ -375,12 +379,12 @@ impl Addressable<MappedSlot> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn translate(&self, gpa: usize) -> Result<*const u8> {
|
||||
pub fn translate(&self, gpa: u64) -> Result<*const u8> {
|
||||
let s = self.get_partial_slice(gpa, 1)?;
|
||||
Ok(s.as_ptr())
|
||||
}
|
||||
|
||||
pub fn translate_iov<'a>(&'a self, iov: &[(usize, usize)]) -> Result<Vec<IoSlice<'a>>> {
|
||||
pub fn translate_iov<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSlice<'a>>> {
|
||||
let mut slices = vec![];
|
||||
for (gpa, len) in iov {
|
||||
for r in self.slice_iter(*gpa, *len) {
|
||||
|
@ -390,7 +394,7 @@ impl Addressable<MappedSlot> {
|
|||
Ok(slices)
|
||||
}
|
||||
|
||||
pub fn translate_iov_mut<'a>(&'a self, iov: &[(usize, usize)]) -> Result<Vec<IoSliceMut<'a>>> {
|
||||
pub fn translate_iov_mut<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSliceMut<'a>>> {
|
||||
let mut slices = vec![];
|
||||
for (gpa, len) in iov {
|
||||
for r in self.slice_iter_mut(*gpa, *len) {
|
||||
|
@ -438,7 +442,7 @@ impl RamBus {
|
|||
}
|
||||
}
|
||||
|
||||
fn map_to_vm(&self, user_mem: &MappedSlot, addr: usize) -> Result<(), Error> {
|
||||
fn map_to_vm(&self, user_mem: &MappedSlot, addr: u64) -> Result<(), Error> {
|
||||
let mem_options = MemMapOption {
|
||||
read: true,
|
||||
write: true,
|
||||
|
@ -448,7 +452,7 @@ impl RamBus {
|
|||
self.vm_memory.mem_map(
|
||||
user_mem.slot_id,
|
||||
addr,
|
||||
user_mem.pages.size,
|
||||
user_mem.pages.size as u64,
|
||||
user_mem.pages.addr,
|
||||
mem_options,
|
||||
)?;
|
||||
|
@ -461,7 +465,7 @@ impl RamBus {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn unmap_from_vm(&self, user_mem: &MappedSlot, addr: usize) -> Result<(), Error> {
|
||||
fn unmap_from_vm(&self, user_mem: &MappedSlot, addr: u64) -> Result<(), Error> {
|
||||
self.vm_memory
|
||||
.unmap(user_mem.slot_id, addr, user_mem.size())?;
|
||||
log::trace!(
|
||||
|
@ -473,7 +477,7 @@ impl RamBus {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn add(&self, gpa: usize, user_mem: ArcMemPages) -> Result<(), Error> {
|
||||
pub(crate) fn add(&self, gpa: u64, user_mem: ArcMemPages) -> Result<(), Error> {
|
||||
let mut inner = self.inner.write();
|
||||
let slot = MappedSlot {
|
||||
slot_id: self.next_slot_id.fetch_add(1, Ordering::AcqRel) % self.max_mem_slots,
|
||||
|
@ -492,14 +496,14 @@ impl RamBus {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn remove(&self, gpa: usize) -> Result<ArcMemPages, Error> {
|
||||
pub(super) fn remove(&self, gpa: u64) -> Result<ArcMemPages, Error> {
|
||||
let mut inner = self.inner.write();
|
||||
let mem = inner.remove(gpa)?;
|
||||
self.unmap_from_vm(&mem, gpa)?;
|
||||
Ok(mem.pages)
|
||||
}
|
||||
|
||||
pub fn read<T>(&self, gpa: usize) -> Result<T, Error>
|
||||
pub fn read<T>(&self, gpa: u64) -> Result<T, Error>
|
||||
where
|
||||
T: FromBytes + AsBytes,
|
||||
{
|
||||
|
@ -507,7 +511,7 @@ impl RamBus {
|
|||
inner.read(gpa)
|
||||
}
|
||||
|
||||
pub fn write<T>(&self, gpa: usize, val: &T) -> Result<(), Error>
|
||||
pub fn write<T>(&self, gpa: u64, val: &T) -> Result<(), Error>
|
||||
where
|
||||
T: AsBytes,
|
||||
{
|
||||
|
@ -515,7 +519,7 @@ impl RamBus {
|
|||
inner.write(gpa, val)
|
||||
}
|
||||
|
||||
pub fn read_range(&self, gpa: usize, len: usize, dst: &mut impl Write) -> Result<()> {
|
||||
pub fn read_range(&self, gpa: u64, len: u64, dst: &mut impl Write) -> Result<()> {
|
||||
let inner = self.inner.read();
|
||||
for r in inner.slice_iter(gpa, len) {
|
||||
dst.write_all(r?)?;
|
||||
|
@ -523,7 +527,7 @@ impl RamBus {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_range(&self, gpa: usize, len: usize, mut src: impl Read) -> Result<()> {
|
||||
pub fn write_range(&self, gpa: u64, len: u64, mut src: impl Read) -> Result<()> {
|
||||
let inner = self.inner.read();
|
||||
for r in inner.slice_iter_mut(gpa, len) {
|
||||
src.read_exact(r?)?;
|
||||
|
@ -531,7 +535,7 @@ impl RamBus {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_vectored<T, F>(&self, bufs: &[(usize, usize)], callback: F) -> Result<T, Error>
|
||||
pub fn read_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&[IoSlice<'_>]) -> T,
|
||||
{
|
||||
|
@ -545,7 +549,7 @@ impl RamBus {
|
|||
Ok(callback(&iov))
|
||||
}
|
||||
|
||||
pub fn write_vectored<T, F>(&self, bufs: &[(usize, usize)], callback: F) -> Result<T, Error>
|
||||
pub fn write_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut [IoSliceMut<'_>]) -> T,
|
||||
{
|
||||
|
@ -563,9 +567,9 @@ impl RamBus {
|
|||
let inner = self.inner.read();
|
||||
let mut start = gpa;
|
||||
let end = gpa + size;
|
||||
while let Some((addr, slot)) = inner.search_next(start as usize) {
|
||||
let gpa_start = std::cmp::max(addr as u64, start);
|
||||
let gpa_end = std::cmp::min(end, (addr + slot.size()) as u64);
|
||||
while let Some((addr, slot)) = inner.search_next(start) {
|
||||
let gpa_start = std::cmp::max(addr, start);
|
||||
let gpa_end = std::cmp::min(end, addr + slot.size());
|
||||
if gpa_start >= gpa_end {
|
||||
break;
|
||||
}
|
||||
|
@ -610,14 +614,14 @@ mod test {
|
|||
data: [u32; 8],
|
||||
}
|
||||
|
||||
const PAGE_SIZE: usize = 1 << 12;
|
||||
const PAGE_SIZE: u64 = 1 << 12;
|
||||
|
||||
#[test]
|
||||
fn test_ram_bus_read() {
|
||||
let bus = RamBus::new(FakeVmMemory);
|
||||
let prot = PROT_READ | PROT_WRITE;
|
||||
let mem1 = ArcMemPages::from_anonymous(PAGE_SIZE, Some(prot)).unwrap();
|
||||
let mem2 = ArcMemPages::from_anonymous(PAGE_SIZE, Some(prot)).unwrap();
|
||||
let mem1 = ArcMemPages::from_anonymous(PAGE_SIZE as usize, Some(prot)).unwrap();
|
||||
let mem2 = ArcMemPages::from_anonymous(PAGE_SIZE as usize, Some(prot)).unwrap();
|
||||
|
||||
if mem1.addr > mem2.addr {
|
||||
bus.add(0x0, mem1).unwrap();
|
||||
|
@ -630,7 +634,7 @@ mod test {
|
|||
let data = MyStruct {
|
||||
data: [1, 2, 3, 4, 5, 6, 7, 8],
|
||||
};
|
||||
let data_size = size_of::<MyStruct>();
|
||||
let data_size = size_of::<MyStruct>() as u64;
|
||||
for gpa in (PAGE_SIZE - data_size)..=PAGE_SIZE {
|
||||
bus.write(gpa, &data).unwrap();
|
||||
let r: MyStruct = bus.read(gpa).unwrap();
|
||||
|
|
|
@ -45,11 +45,11 @@ pub trait ChangeLayout: Debug + Send + Sync + 'static {
|
|||
pub enum Error {
|
||||
#[error("{new_item:#x?} overlaps with {exist_item:#x?}")]
|
||||
Overlap {
|
||||
new_item: [usize; 2],
|
||||
exist_item: [usize; 2],
|
||||
new_item: [u64; 2],
|
||||
exist_item: [u64; 2],
|
||||
},
|
||||
#[error("(addr={addr:#x}, size={size:#x}) is out of range")]
|
||||
OutOfRange { addr: usize, size: usize },
|
||||
OutOfRange { addr: u64, size: u64 },
|
||||
#[error("io: {source:#x?}")]
|
||||
Io {
|
||||
#[from]
|
||||
|
@ -58,9 +58,9 @@ pub enum Error {
|
|||
#[error("mmap: {0}")]
|
||||
Mmap(#[source] std::io::Error),
|
||||
#[error("offset {offset:#x} exceeds limit {limit:#x}")]
|
||||
ExceedLimit { offset: usize, limit: usize },
|
||||
ExceedLimit { offset: u64, limit: u64 },
|
||||
#[error("{0:#x} is not mapped")]
|
||||
NotMapped(usize),
|
||||
NotMapped(u64),
|
||||
#[error("zero memory size")]
|
||||
ZeroMemorySize,
|
||||
#[error("lock poisoned")]
|
||||
|
@ -89,11 +89,11 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
|
|||
pub enum MemRange {
|
||||
Mapped(ArcMemPages),
|
||||
Emulated(MmioRange),
|
||||
Span(usize),
|
||||
Span(u64),
|
||||
}
|
||||
|
||||
impl MemRange {
|
||||
pub fn size(&self) -> usize {
|
||||
pub fn size(&self) -> u64 {
|
||||
match self {
|
||||
MemRange::Mapped(pages) => pages.size(),
|
||||
MemRange::Emulated(range) => range.size(),
|
||||
|
@ -113,12 +113,12 @@ pub enum MemRegionType {
|
|||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct MemRegionEntry {
|
||||
pub size: usize,
|
||||
pub size: u64,
|
||||
pub type_: MemRegionType,
|
||||
}
|
||||
|
||||
pub trait MemRegionCallback: Debug + Send + Sync + 'static {
|
||||
fn mapped(&self, addr: usize) -> Result<()>;
|
||||
fn mapped(&self, addr: u64) -> Result<()>;
|
||||
fn unmapped(&self) -> Result<()> {
|
||||
log::debug!("{} unmapped", type_name::<Self>());
|
||||
Ok(())
|
||||
|
@ -127,7 +127,7 @@ pub trait MemRegionCallback: Debug + Send + Sync + 'static {
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct MemRegion {
|
||||
pub size: usize,
|
||||
pub size: u64,
|
||||
pub ranges: Vec<MemRange>,
|
||||
pub entries: Vec<MemRegionEntry>,
|
||||
pub callbacks: Mutex<Vec<Box<dyn MemRegionCallback>>>,
|
||||
|
@ -168,7 +168,7 @@ impl MemRegion {
|
|||
}
|
||||
|
||||
impl SlotBackend for Arc<MemRegion> {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ impl IoRegion {
|
|||
}
|
||||
|
||||
impl SlotBackend for Arc<IoRegion> {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
self.range.size()
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ impl SlotBackend for Arc<IoRegion> {
|
|||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum AddrOpt {
|
||||
Any,
|
||||
Fixed(usize),
|
||||
Fixed(u64),
|
||||
Below4G,
|
||||
Above4G,
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ impl Memory {
|
|||
regions: &mut Addressable<Arc<MemRegion>>,
|
||||
addr: AddrOpt,
|
||||
region: Arc<MemRegion>,
|
||||
) -> Result<usize> {
|
||||
) -> Result<u64> {
|
||||
match addr {
|
||||
AddrOpt::Fixed(addr) => {
|
||||
let _region = regions.add(addr, region)?;
|
||||
|
@ -244,7 +244,7 @@ impl Memory {
|
|||
}
|
||||
AddrOpt::Any | AddrOpt::Above4G => {
|
||||
let align = std::cmp::max(region.size.next_power_of_two(), PAGE_SIZE);
|
||||
regions.add_within(MEM_64_START, usize::MAX, align, region)
|
||||
regions.add_within(MEM_64_START, u64::MAX, align, region)
|
||||
}
|
||||
AddrOpt::Below4G => {
|
||||
let align = std::cmp::max(region.size.next_power_of_two(), PAGE_SIZE);
|
||||
|
@ -253,7 +253,7 @@ impl Memory {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn add_region(&self, addr: AddrOpt, region: Arc<MemRegion>) -> Result<usize> {
|
||||
pub fn add_region(&self, addr: AddrOpt, region: Arc<MemRegion>) -> Result<u64> {
|
||||
region.validate()?;
|
||||
let mut regions = self.regions.lock();
|
||||
let addr = Self::alloc(&mut regions, addr, region.clone())?;
|
||||
|
@ -273,7 +273,7 @@ impl Memory {
|
|||
Ok(addr)
|
||||
}
|
||||
|
||||
fn unmap_region(&self, addr: usize, region: &MemRegion) -> Result<()> {
|
||||
fn unmap_region(&self, addr: u64, region: &MemRegion) -> Result<()> {
|
||||
let mut offset = 0;
|
||||
for range in ®ion.ranges {
|
||||
match range {
|
||||
|
@ -294,7 +294,7 @@ impl Memory {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_region(&self, addr: usize) -> Result<Arc<MemRegion>> {
|
||||
pub fn remove_region(&self, addr: u64) -> Result<Arc<MemRegion>> {
|
||||
let mut regions = self.regions.lock();
|
||||
let region = regions.remove(addr)?;
|
||||
self.unmap_region(addr, ®ion)?;
|
||||
|
@ -319,7 +319,7 @@ impl Memory {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mem_region_entries(&self) -> Vec<(usize, MemRegionEntry)> {
|
||||
pub fn mem_region_entries(&self) -> Vec<(u64, MemRegionEntry)> {
|
||||
let mut entries = vec![];
|
||||
let regions = self.regions.lock();
|
||||
for (start, region) in regions.iter() {
|
||||
|
@ -339,18 +339,18 @@ impl Memory {
|
|||
pub fn add_io_region(&self, port: Option<u16>, region: Arc<IoRegion>) -> Result<u16, Error> {
|
||||
let mut regions = self.io_regions.lock();
|
||||
// TODO: allocate port dynamically
|
||||
regions.add(port.unwrap() as usize, region.clone())?;
|
||||
regions.add(port.unwrap() as u64, region.clone())?;
|
||||
self.io_bus
|
||||
.add(port.unwrap() as usize, region.range.clone())?;
|
||||
.add(port.unwrap() as u64, region.range.clone())?;
|
||||
let callbacks = region.callbacks.lock();
|
||||
for callback in callbacks.iter() {
|
||||
callback.mapped(port.unwrap() as usize)?;
|
||||
callback.mapped(port.unwrap() as u64)?;
|
||||
}
|
||||
Ok(port.unwrap())
|
||||
}
|
||||
|
||||
fn unmap_io_region(&self, port: u16, region: &IoRegion) -> Result<()> {
|
||||
self.io_bus.remove(port as usize)?;
|
||||
self.io_bus.remove(port as u64)?;
|
||||
let callbacks = region.callbacks.lock();
|
||||
for callback in callbacks.iter() {
|
||||
callback.unmapped()?;
|
||||
|
@ -360,7 +360,7 @@ impl Memory {
|
|||
|
||||
pub fn remove_io_region(&self, port: u16) -> Result<Arc<IoRegion>> {
|
||||
let mut io_regions = self.io_regions.lock();
|
||||
let io_region = io_regions.remove(port as usize)?;
|
||||
let io_region = io_regions.remove(port as u64)?;
|
||||
self.unmap_io_region(port, &io_region)?;
|
||||
Ok(io_region)
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ impl Memory {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn handle_mmio(&self, gpa: usize, write: Option<u64>, size: u8) -> Result<VmEntry> {
|
||||
pub fn handle_mmio(&self, gpa: u64, write: Option<u64>, size: u8) -> Result<VmEntry> {
|
||||
if let Some(val) = write {
|
||||
match self.mmio_bus.write(gpa, size, val) {
|
||||
Ok(()) => Ok(VmEntry::None),
|
||||
|
@ -410,13 +410,13 @@ impl Memory {
|
|||
return Ok(VmEntry::Reboot);
|
||||
}
|
||||
if let Some(val) = write {
|
||||
match self.io_bus.write(port as usize, size, val as u64) {
|
||||
match self.io_bus.write(port as u64, size, val as u64) {
|
||||
Ok(()) => Ok(VmEntry::None),
|
||||
Err(Error::Action(action)) => self.handle_action(action),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
} else {
|
||||
let data = self.io_bus.read(port as usize, size)? as u32;
|
||||
let data = self.io_bus.read(port as u64, size)? as u32;
|
||||
Ok(VmEntry::Io { data })
|
||||
}
|
||||
}
|
||||
|
@ -434,17 +434,17 @@ mod test {
|
|||
fn test_memory_add_remove() {
|
||||
#[derive(Debug)]
|
||||
struct TestMmio {
|
||||
size: usize,
|
||||
size: u64,
|
||||
}
|
||||
|
||||
impl Mmio for TestMmio {
|
||||
fn read(&self, _offset: usize, _size: u8) -> Result<u64> {
|
||||
fn read(&self, _offset: u64, _size: u8) -> Result<u64> {
|
||||
Ok(0)
|
||||
}
|
||||
fn write(&self, _offset: usize, _size: u8, _val: u64) -> Result<()> {
|
||||
fn write(&self, _offset: u64, _size: u8, _val: u64) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,8 +42,8 @@ bitfield! {
|
|||
}
|
||||
|
||||
impl Address {
|
||||
pub fn to_ecam_addr(self) -> usize {
|
||||
let v = self.0 as usize;
|
||||
pub fn to_ecam_addr(self) -> u64 {
|
||||
let v = self.0 as u64;
|
||||
((v & 0xff_ff00) << 4) | (v & 0xfc)
|
||||
}
|
||||
}
|
||||
|
@ -55,11 +55,11 @@ pub struct PciIoBus {
|
|||
}
|
||||
|
||||
impl Mmio for PciIoBus {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
8
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64, mem::Error> {
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64, mem::Error> {
|
||||
match offset {
|
||||
0 => {
|
||||
assert_eq!(size, 4);
|
||||
|
@ -74,7 +74,7 @@ impl Mmio for PciIoBus {
|
|||
}
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
match offset {
|
||||
0 => {
|
||||
assert_eq!(size, 4);
|
||||
|
|
|
@ -129,21 +129,21 @@ pub trait PciCap: Mmio {
|
|||
}
|
||||
|
||||
impl SlotBackend for Box<dyn PciCap> {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
Mmio::size(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl Mmio for Box<dyn PciCap> {
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
Mmio::read(self.as_ref(), offset, size)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
Mmio::write(self.as_ref(), offset, size, val)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
Mmio::size(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
@ -172,15 +172,15 @@ impl PciCapList {
|
|||
}
|
||||
|
||||
impl Mmio for PciCapList {
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64, mem::Error> {
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64, mem::Error> {
|
||||
self.inner.read(offset, size)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
self.inner.write(offset, size, val)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
4096
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ impl TryFrom<Vec<Box<dyn PciCap>>> for PciCapList {
|
|||
type Error = Error;
|
||||
fn try_from(caps: Vec<Box<dyn PciCap>>) -> Result<Self, Self::Error> {
|
||||
let bus = MmioBus::new();
|
||||
let mut ptr = size_of::<DeviceHeader>();
|
||||
let mut ptr = size_of::<DeviceHeader>() as u64;
|
||||
let num_caps = caps.len();
|
||||
for (index, mut cap) in caps.into_iter().enumerate() {
|
||||
let next = if index == num_caps - 1 {
|
||||
|
@ -211,16 +211,16 @@ pub struct MsixCapMmio {
|
|||
}
|
||||
|
||||
impl Mmio for MsixCapMmio {
|
||||
fn size(&self) -> usize {
|
||||
size_of::<MsixCap>()
|
||||
fn size(&self) -> u64 {
|
||||
size_of::<MsixCap>() as u64
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64, mem::Error> {
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64, mem::Error> {
|
||||
let cap = self.cap.read();
|
||||
Mmio::read(&*cap, offset, size)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
if offset == 2 && size == 2 {
|
||||
let mut cap = self.cap.write();
|
||||
let control = MsixMsgCtrl(val as u16);
|
||||
|
@ -292,16 +292,16 @@ impl<F> Mmio for MsixTableMmio<F>
|
|||
where
|
||||
F: IrqFd,
|
||||
{
|
||||
fn size(&self) -> usize {
|
||||
size_of::<MsixTableEntry>() * self.entries.len()
|
||||
fn size(&self) -> u64 {
|
||||
(size_of::<MsixTableEntry>() * self.entries.len()) as u64
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
if size != 4 || offset & 0b11 != 0 {
|
||||
log::error!("unaligned access to msix table: size = {size}, offset = {offset:#x}");
|
||||
return Ok(0);
|
||||
}
|
||||
let index = offset / size_of::<MsixTableEntry>();
|
||||
let index = offset as usize / size_of::<MsixTableEntry>();
|
||||
let Some(entry) = self.entries.get(index) else {
|
||||
log::error!(
|
||||
"MSI-X table size: {}, accessing index {index}",
|
||||
|
@ -310,7 +310,7 @@ where
|
|||
return Ok(0);
|
||||
};
|
||||
let entry = entry.read();
|
||||
let ret = match offset % size_of::<MsixTableEntry>() {
|
||||
let ret = match offset as usize % size_of::<MsixTableEntry>() {
|
||||
0 => entry.get_addr_lo(),
|
||||
4 => entry.get_addr_hi(),
|
||||
8 => entry.get_data(),
|
||||
|
@ -320,13 +320,13 @@ where
|
|||
Ok(ret as u64)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
if size != 4 || offset & 0b11 != 0 {
|
||||
log::error!("unaligned access to msix table: size = {size}, offset = {offset:#x}");
|
||||
return Ok(());
|
||||
}
|
||||
let val = val as u32;
|
||||
let index = offset / size_of::<MsixTableEntry>();
|
||||
let index = offset as usize / size_of::<MsixTableEntry>();
|
||||
let Some(entry) = self.entries.get(index) else {
|
||||
log::error!(
|
||||
"MSI-X table size: {}, accessing index {index}",
|
||||
|
@ -335,7 +335,7 @@ where
|
|||
return Ok(());
|
||||
};
|
||||
let mut entry = entry.write();
|
||||
match offset % size_of::<MsixTableEntry>() {
|
||||
match offset as usize % size_of::<MsixTableEntry>() {
|
||||
0 => entry.set_addr_lo(val)?,
|
||||
4 => entry.set_addr_hi(val)?,
|
||||
8 => entry.set_data(val)?,
|
||||
|
|
|
@ -142,9 +142,9 @@ impl ChangeLayout for UpdateCommandCallback {
|
|||
if !self.changed.contains(Command::MEM) {
|
||||
continue;
|
||||
}
|
||||
let mut addr = (bar & !BAR_MEM_MASK) as usize;
|
||||
let mut addr = (bar & !BAR_MEM_MASK) as u64;
|
||||
if matches!(pci_bar, PciBar::Mem64(_)) {
|
||||
addr |= (self.bars[i + 1] as usize) << 32;
|
||||
addr |= (self.bars[i + 1] as u64) << 32;
|
||||
}
|
||||
if self.current.contains(Command::MEM) {
|
||||
memory.add_region(AddrOpt::Fixed(addr), region.clone())?;
|
||||
|
@ -192,8 +192,8 @@ impl ChangeLayout for MoveBarCallback {
|
|||
} else {
|
||||
let src_addr = self.src & !(BAR_MEM_MASK as u64);
|
||||
let dst_addr = self.dst & !(BAR_MEM_MASK as u64);
|
||||
let region = memory.remove_region(src_addr as usize)?;
|
||||
memory.add_region(AddrOpt::Fixed(dst_addr as usize), region)?;
|
||||
let region = memory.remove_region(src_addr)?;
|
||||
memory.add_region(AddrOpt::Fixed(dst_addr), region)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -231,12 +231,13 @@ impl HeaderData {
|
|||
|
||||
fn write_header(
|
||||
&mut self,
|
||||
offset: usize,
|
||||
offset: u64,
|
||||
size: u8,
|
||||
val: u64,
|
||||
pci_bars: &[PciBar; 6],
|
||||
) -> Option<Box<dyn ChangeLayout>> {
|
||||
let bdf = self.bdf;
|
||||
let offset = offset as usize;
|
||||
match &mut self.header {
|
||||
ConfigHeader::Device(header) => match (offset, size as usize) {
|
||||
CommonHeader::LAYOUT_COMMAND => {
|
||||
|
@ -351,11 +352,12 @@ impl EmulatedHeader {
|
|||
}
|
||||
|
||||
impl Mmio for EmulatedHeader {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
0x40
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
let offset = offset as usize;
|
||||
let data = self.data.read();
|
||||
let bytes = match &data.header {
|
||||
ConfigHeader::Device(header) => AsBytes::as_bytes(header),
|
||||
|
@ -370,7 +372,7 @@ impl Mmio for EmulatedHeader {
|
|||
Ok(ret.unwrap_or(0))
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
let mut data = self.data.write();
|
||||
if let Some(callback) = data.write_header(offset, size, val, &self.bars) {
|
||||
Err(mem::Error::Action(mem::Action::ChangeLayout { callback }))
|
||||
|
@ -391,23 +393,23 @@ pub struct EmulatedConfig {
|
|||
}
|
||||
|
||||
impl Mmio for EmulatedConfig {
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
if offset < size_of::<DeviceHeader>() {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
if offset < size_of::<DeviceHeader>() as u64 {
|
||||
self.header.read(offset, size)
|
||||
} else {
|
||||
self.caps.read(offset, size)
|
||||
}
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
if offset < size_of::<DeviceHeader>() {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
if offset < size_of::<DeviceHeader>() as u64 {
|
||||
self.header.write(offset, size, val)
|
||||
} else {
|
||||
self.caps.write(offset, size, val)
|
||||
}
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
4096
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ struct BarCallback {
|
|||
}
|
||||
|
||||
impl MemRegionCallback for BarCallback {
|
||||
fn mapped(&self, addr: usize) -> mem::Result<()> {
|
||||
fn mapped(&self, addr: u64) -> mem::Result<()> {
|
||||
self.header
|
||||
.write()
|
||||
.set_bar(self.index as usize, addr as u32);
|
||||
|
|
|
@ -40,12 +40,12 @@ impl PciSegment {
|
|||
}
|
||||
|
||||
impl Mmio for PciSegment {
|
||||
fn size(&self) -> usize {
|
||||
fn size(&self) -> u64 {
|
||||
// 256 MiB: 256 buses, 32 devices, 8 functions
|
||||
256 * 32 * 8 * 4096
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> Result<u64, mem::Error> {
|
||||
fn read(&self, offset: u64, size: u8) -> Result<u64, mem::Error> {
|
||||
let bdf = Bdf((offset >> 12) as u16);
|
||||
let configs = self.configs.read();
|
||||
if let Some(config) = configs.get(&bdf) {
|
||||
|
@ -55,7 +55,7 @@ impl Mmio for PciSegment {
|
|||
}
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> Result<(), mem::Error> {
|
||||
let bdf = Bdf((offset >> 12) as u16);
|
||||
let configs = self.configs.read();
|
||||
if let Some(config) = configs.get(&bdf) {
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
use std::fmt::Debug;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::mem::size_of;
|
||||
use std::os::unix::prelude::OpenOptionsExt;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -35,15 +34,15 @@ use crate::virtio::{IrqSender, Result, FEATURE_BUILT_IN};
|
|||
pub struct EntropyConfig;
|
||||
|
||||
impl Mmio for EntropyConfig {
|
||||
fn size(&self) -> usize {
|
||||
size_of::<Self>()
|
||||
fn size(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
fn read(&self, _offset: usize, _size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, _offset: u64, _size: u8) -> mem::Result<u64> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn write(&self, _offset: usize, _size: u8, _val: u64) -> mem::Result<()> {
|
||||
fn write(&self, _offset: u64, _size: u8, _val: u64) -> mem::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,9 +126,9 @@ impl Virtio for VhostVsock {
|
|||
let mem = memory.lock_layout();
|
||||
for (index, (gpa, user_mem)) in mem.iter().enumerate() {
|
||||
table.num += 1;
|
||||
table.regions[index].gpa = gpa as u64;
|
||||
table.regions[index].gpa = gpa;
|
||||
table.regions[index].hva = user_mem.pages.addr() as u64;
|
||||
table.regions[index].size = user_mem.pages.size() as u64;
|
||||
table.regions[index].size = user_mem.pages.size();
|
||||
}
|
||||
self.vhost_dev.set_mem_table(&table)?;
|
||||
for (index, (queue, error_fd)) in
|
||||
|
@ -160,9 +160,9 @@ impl Virtio for VhostVsock {
|
|||
let virtq_addr = VirtqAddr {
|
||||
index,
|
||||
flags: 0,
|
||||
desc_hva: mem.translate(queue.desc.load(Ordering::Acquire) as usize)? as _,
|
||||
used_hva: mem.translate(queue.device.load(Ordering::Acquire) as usize)? as _,
|
||||
avail_hva: mem.translate(queue.driver.load(Ordering::Acquire) as usize)? as _,
|
||||
desc_hva: mem.translate(queue.desc.load(Ordering::Acquire))? as _,
|
||||
used_hva: mem.translate(queue.device.load(Ordering::Acquire))? as _,
|
||||
avail_hva: mem.translate(queue.driver.load(Ordering::Acquire))? as _,
|
||||
log_guest_addr: 0,
|
||||
};
|
||||
self.vhost_dev.set_virtq_addr(&virtq_addr)?;
|
||||
|
|
|
@ -242,13 +242,13 @@ impl<M> Mmio for VirtioPciRegisterMmio<M>
|
|||
where
|
||||
M: MsiSender,
|
||||
{
|
||||
fn size(&self) -> usize {
|
||||
size_of::<VirtioPciRegister>() + size_of::<u32>() * self.queues.len()
|
||||
fn size(&self) -> u64 {
|
||||
(size_of::<VirtioPciRegister>() + size_of::<u32>() * self.queues.len()) as u64
|
||||
}
|
||||
|
||||
fn read(&self, offset: usize, size: u8) -> mem::Result<u64> {
|
||||
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
|
||||
let reg = &*self.reg;
|
||||
let ret = match (offset, size as usize) {
|
||||
let ret = match (offset as usize, size as usize) {
|
||||
VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {
|
||||
reg.device_feature_sel.load(Ordering::Acquire) as u64
|
||||
}
|
||||
|
@ -370,9 +370,9 @@ where
|
|||
Ok(ret)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, size: u8, val: u64) -> mem::Result<()> {
|
||||
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<()> {
|
||||
let reg = &*self.reg;
|
||||
match (offset, size as usize) {
|
||||
match (offset as usize, size as usize) {
|
||||
VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {
|
||||
reg.device_feature_sel.store(val as u8, Ordering::Release);
|
||||
}
|
||||
|
@ -531,10 +531,10 @@ impl<R> MemRegionCallback for IoeventFdCallback<R>
|
|||
where
|
||||
R: IoeventFdRegistry,
|
||||
{
|
||||
fn mapped(&self, addr: usize) -> mem::Result<()> {
|
||||
fn mapped(&self, addr: u64) -> mem::Result<()> {
|
||||
for (q_index, fd) in self.ioeventfds.iter().enumerate() {
|
||||
let base_addr = addr + (12 << 10) + VirtioPciRegister::OFFSET_QUEUE_NOTIFY;
|
||||
let notify_addr = base_addr + q_index * size_of::<u32>();
|
||||
let base_addr = addr + (12 << 10) + VirtioPciRegister::OFFSET_QUEUE_NOTIFY as u64;
|
||||
let notify_addr = base_addr + (q_index * size_of::<u32>()) as u64;
|
||||
self.registry.register(fd, notify_addr, 0, None)?;
|
||||
log::info!("q-{q_index} ioeventfd registered at {notify_addr:x}",)
|
||||
}
|
||||
|
@ -818,7 +818,7 @@ where
|
|||
entries: msix_entries,
|
||||
})));
|
||||
bar0.ranges
|
||||
.push(MemRange::Span((12 << 10) - msix_table_size));
|
||||
.push(MemRange::Span((12 << 10) - msix_table_size as u64));
|
||||
bar0.ranges.push(MemRange::Emulated(registers.clone()));
|
||||
bar0.callbacks.lock().push(Box::new(IoeventFdCallback {
|
||||
registry: ioeventfd_reg,
|
||||
|
@ -829,14 +829,14 @@ where
|
|||
}
|
||||
let mut bars = PciBar::empty_6();
|
||||
let mut bar_masks = [0; 6];
|
||||
let bar0_mask = !((bar0.size as u64).next_power_of_two() - 1);
|
||||
let bar0_mask = !(bar0.size.next_power_of_two() - 1);
|
||||
bar_masks[0] = bar0_mask as u32;
|
||||
bar_masks[1] = (bar0_mask >> 32) as u32;
|
||||
bars[0] = PciBar::Mem64(Arc::new(bar0));
|
||||
header.bars[0] = BAR_MEM64;
|
||||
|
||||
if let Some(region) = &dev.shared_mem_regions {
|
||||
let bar2_mask = !((region.size as u64).next_power_of_two() - 1);
|
||||
let bar2_mask = !(region.size.next_power_of_two() - 1);
|
||||
bar_masks[2] = bar2_mask as u32;
|
||||
bar_masks[3] = (bar2_mask >> 32) as u32;
|
||||
bars[2] = PciBar::Mem64(region.clone());
|
||||
|
|
|
@ -158,29 +158,29 @@ impl<'g, 'm> SplitLayout<'g, 'm> {
|
|||
|
||||
fn get_indirect(
|
||||
&self,
|
||||
addr: usize,
|
||||
readable: &mut Vec<(usize, usize)>,
|
||||
writeable: &mut Vec<(usize, usize)>,
|
||||
addr: u64,
|
||||
readable: &mut Vec<(u64, u64)>,
|
||||
writeable: &mut Vec<(u64, u64)>,
|
||||
) -> Result<()> {
|
||||
let mut id = 0;
|
||||
loop {
|
||||
let desc: Desc = self.guard.read(addr + id * size_of::<Desc>())?;
|
||||
let desc: Desc = self.guard.read(addr + id * size_of::<Desc>() as u64)?;
|
||||
let flag = DescFlag::from_bits_retain(desc.flag);
|
||||
assert!(!flag.contains(DescFlag::INDIRECT));
|
||||
if flag.contains(DescFlag::WRITE) {
|
||||
writeable.push((desc.addr as usize, desc.len as usize));
|
||||
writeable.push((desc.addr, desc.len as u64));
|
||||
} else {
|
||||
readable.push((desc.addr as usize, desc.len as usize));
|
||||
readable.push((desc.addr, desc.len as u64));
|
||||
}
|
||||
if flag.contains(DescFlag::NEXT) {
|
||||
id = desc.next as usize;
|
||||
id = desc.next as u64;
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_desc_iov(&self, mut id: u16) -> Result<(Vec<(usize, usize)>, Vec<(usize, usize)>)> {
|
||||
pub fn get_desc_iov(&self, mut id: u16) -> Result<(Vec<(u64, u64)>, Vec<(u64, u64)>)> {
|
||||
let mut readable = Vec::new();
|
||||
let mut writeable = Vec::new();
|
||||
loop {
|
||||
|
@ -188,11 +188,11 @@ impl<'g, 'm> SplitLayout<'g, 'm> {
|
|||
let flag = DescFlag::from_bits_retain(desc.flag);
|
||||
if flag.contains(DescFlag::INDIRECT) {
|
||||
assert_eq!(desc.len & 0xf, 0);
|
||||
self.get_indirect(desc.addr as usize, &mut readable, &mut writeable)?;
|
||||
self.get_indirect(desc.addr, &mut readable, &mut writeable)?;
|
||||
} else if flag.contains(DescFlag::WRITE) {
|
||||
writeable.push((desc.addr as usize, desc.len as usize));
|
||||
writeable.push((desc.addr, desc.len as u64));
|
||||
} else {
|
||||
readable.push((desc.addr as usize, desc.len as usize));
|
||||
readable.push((desc.addr, desc.len as u64));
|
||||
}
|
||||
if flag.contains(DescFlag::NEXT) {
|
||||
id = desc.next;
|
||||
|
@ -276,35 +276,31 @@ impl<'m, 'q> QueueGuard for SplitQueueGuard<'m, 'q> {
|
|||
fn queue(&self) -> Result<impl LockedQueue> {
|
||||
let mut avail_event = None;
|
||||
let mut used_event = None;
|
||||
let queue_size = self.register.size as usize;
|
||||
let queue_size = self.register.size as u64;
|
||||
if self.register.feature.contains(VirtioFeature::EVENT_IDX) {
|
||||
let avail_event_gpa = self.register.used as usize
|
||||
+ size_of::<UsedHeader>()
|
||||
+ queue_size * size_of::<UsedElem>();
|
||||
let avail_event_gpa = self.register.used
|
||||
+ size_of::<UsedHeader>() as u64
|
||||
+ queue_size * size_of::<UsedElem>() as u64;
|
||||
avail_event = Some(self.guard.get_ref(avail_event_gpa)?);
|
||||
let used_event_gpa = self.register.avail as usize
|
||||
+ size_of::<AvailHeader>()
|
||||
+ queue_size * size_of::<u16>();
|
||||
let used_event_gpa = self.register.avail
|
||||
+ size_of::<AvailHeader>() as u64
|
||||
+ queue_size * size_of::<u16>() as u64;
|
||||
used_event = Some(self.guard.get_ref(used_event_gpa)?);
|
||||
}
|
||||
let used = self
|
||||
.guard
|
||||
.get_ref::<UsedHeader>(self.register.used as usize)?;
|
||||
let used = self.guard.get_ref::<UsedHeader>(self.register.used)?;
|
||||
let used_index = unsafe { &*used.get() }.idx;
|
||||
let avail_ring_gpa = self.register.avail as usize + size_of::<AvailHeader>();
|
||||
let used_ring_gpa = self.register.used as usize + size_of::<UsedHeader>();
|
||||
let avail_ring_gpa = self.register.avail + size_of::<AvailHeader>() as u64;
|
||||
let used_ring_gpa = self.register.used + size_of::<UsedHeader>() as u64;
|
||||
Ok(SplitLayout {
|
||||
guard: &self.guard,
|
||||
avail: self.guard.get_ref(self.register.avail as usize)?,
|
||||
avail: self.guard.get_ref(self.register.avail)?,
|
||||
avail_ring: self.guard.get_slice(avail_ring_gpa, queue_size)?,
|
||||
used_event,
|
||||
used,
|
||||
used_index,
|
||||
used_ring: self.guard.get_slice(used_ring_gpa, queue_size)?,
|
||||
avail_event,
|
||||
desc: self
|
||||
.guard
|
||||
.get_slice(self.register.desc as usize, queue_size)?,
|
||||
desc: self.guard.get_slice(self.register.desc, queue_size)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue