crosvm/kvm/tests/dirty_log.rs
Noah Gold 9af97d72fa [base] update/clean mmap interfaces.
This CL addresses some minor issues with the existing interface:
1. from_descriptor is too generic for some platforms that require
   special handling for file/File backed mappings.
2. Nearly all call sites pass either File or SharedMemory. Now
   we just have from_ methods for those types to preserve type
   information.
3. Other platforms require additional fields in MemoryMapping, so a
   tuple struct no longer makes sense.
4. The mmap syscall error message was misleading as we use it for more
   than just the mmap syscall.

BUG=None
TEST=builds

Change-Id: I74c41bad52bb81880a11231cd18f47e233548a24
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2815614
Reviewed-by: Udam Saini <udam@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Noah Gold <nkgold@google.com>
2021-04-15 02:10:35 +00:00

77 lines
2.5 KiB
Rust

// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
#[test]
fn test_run() {
/*
0000 881C mov [si],bl
0002 F4 hlt
*/
let code = [0x88, 0x1c, 0xf4];
let mem_size = 0x10000;
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
.expect("Writing code to memory failed.");
let kvm = Kvm::new().expect("new kvm failed");
let mut vm = Vm::new(&kvm, guest_mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = load_addr.offset() as u64;
vcpu_regs.rflags = 2;
// Write 0x12 to the beginning of the 9th page.
vcpu_regs.rsi = 0x8000;
vcpu_regs.rbx = 0x12;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
let slot = vm
.add_memory_region(
GuestAddress(0),
Box::new(
MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,
true,
)
.expect("failed to register memory");
let runnable_vcpu = vcpu.to_runnable(None).unwrap();
loop {
match runnable_vcpu.run().expect("run failed") {
VcpuExit::Hlt => break,
r => panic!("unexpected exit reason: {:?}", r),
}
}
let mut dirty_log = [0x0, 0x0];
vm.get_dirty_log(slot, &mut dirty_log[..])
.expect("failed to get dirty log");
// Tests the 9th page was written to.
assert_eq!(dirty_log[1], 0x1);
assert_eq!(
mmap.read_obj::<u64>(vcpu_regs.rsi as usize).unwrap(),
vcpu_regs.rbx
);
}