2022-10-11 07:27:01 +00:00
|
|
|
// Copyright 2022 The ChromiumOS Authors
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
//! Integration tests for [PageHandler]. these are more than unit tests since [PageHandler] rely on
|
|
|
|
//! the userfaultfd(2) kernel feature.
|
|
|
|
|
2023-05-11 05:16:00 +00:00
|
|
|
#![cfg(all(unix, feature = "enable"))]
|
2023-01-17 20:24:29 +00:00
|
|
|
|
2022-11-07 08:12:05 +00:00
|
|
|
mod common;
|
|
|
|
|
2022-10-11 07:27:01 +00:00
|
|
|
use std::array;
|
2023-01-12 09:22:42 +00:00
|
|
|
use std::ops::Range;
|
2022-10-11 07:27:01 +00:00
|
|
|
use std::thread;
|
|
|
|
use std::time;
|
|
|
|
|
|
|
|
use base::pagesize;
|
2022-12-26 09:26:53 +00:00
|
|
|
use base::MappedRegion;
|
|
|
|
use base::MemoryMappingBuilder;
|
|
|
|
use base::SharedMemory;
|
2022-11-07 08:12:05 +00:00
|
|
|
use common::*;
|
2022-10-11 07:27:01 +00:00
|
|
|
use swap::page_handler::Error;
|
|
|
|
use swap::page_handler::PageHandler;
|
2023-01-30 07:06:27 +00:00
|
|
|
use swap::userfaultfd::register_regions;
|
|
|
|
use swap::userfaultfd::unregister_regions;
|
2023-01-06 09:35:25 +00:00
|
|
|
use swap::worker::Worker;
|
2022-10-11 07:27:01 +00:00
|
|
|
|
2023-01-12 09:22:42 +00:00
|
|
|
const HUGEPAGE_SIZE: usize = 2 * 1024 * 1024; // 2MB
|
2023-01-10 09:15:48 +00:00
|
|
|
|
2022-10-11 07:27:01 +00:00
|
|
|
#[test]
|
2022-11-07 08:12:05 +00:00
|
|
|
fn create_success() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let shm = create_shared_memory("shm", 6 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
|
|
|
|
let result = PageHandler::create(
|
2023-02-15 05:13:57 +00:00
|
|
|
&file,
|
2023-02-20 03:28:15 +00:00
|
|
|
&staging_shmem,
|
2022-11-07 08:12:05 +00:00
|
|
|
&[
|
|
|
|
base_addr..(base_addr + 3 * pagesize()),
|
|
|
|
(base_addr + 3 * pagesize())..(base_addr + 6 * pagesize()),
|
|
|
|
],
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.clone(),
|
2022-11-07 08:12:05 +00:00
|
|
|
);
|
2022-11-16 08:32:28 +00:00
|
|
|
|
2023-02-15 05:13:57 +00:00
|
|
|
assert!(result.is_ok());
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-11-16 08:32:28 +00:00
|
|
|
}
|
|
|
|
|
2022-10-11 07:27:01 +00:00
|
|
|
#[test]
|
2022-11-07 08:12:05 +00:00
|
|
|
fn create_partially_overlap() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
|
|
|
for range in [
|
|
|
|
// the same address range
|
|
|
|
base_addr..(base_addr + 3 * pagesize()),
|
|
|
|
// left of the existing region overlaps
|
|
|
|
(base_addr - pagesize())..(base_addr + pagesize()),
|
|
|
|
// new region is inside
|
|
|
|
(base_addr + pagesize())..(base_addr + 2 * pagesize()),
|
|
|
|
// right of the existing region overlaps
|
|
|
|
(base_addr + 2 * pagesize())..(base_addr + 4 * pagesize()),
|
|
|
|
// new region covers whole the existing region
|
|
|
|
(base_addr - pagesize())..(base_addr + 4 * pagesize()),
|
|
|
|
] {
|
2022-11-07 08:12:05 +00:00
|
|
|
let result = PageHandler::create(
|
2023-02-15 05:13:57 +00:00
|
|
|
&file,
|
2023-02-20 03:28:15 +00:00
|
|
|
&staging_shmem,
|
2022-11-07 08:12:05 +00:00
|
|
|
&[base_addr..(base_addr + 3 * pagesize()), range],
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.clone(),
|
2022-11-07 08:12:05 +00:00
|
|
|
);
|
2022-10-11 07:27:01 +00:00
|
|
|
assert_eq!(result.is_err(), true);
|
|
|
|
match result {
|
|
|
|
Err(Error::RegionOverlap(_, _)) => {}
|
|
|
|
_ => {
|
|
|
|
unreachable!("not overlap")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
2023-02-15 05:13:57 +00:00
|
|
|
#[test]
|
|
|
|
fn create_invalid_range() {
|
|
|
|
let worker = Worker::new(2, 2);
|
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2023-02-15 05:13:57 +00:00
|
|
|
let shm = create_shared_memory("shm", 6 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
|
|
|
|
let result = PageHandler::create(
|
|
|
|
&file,
|
2023-02-20 03:28:15 +00:00
|
|
|
&staging_shmem,
|
2023-02-15 05:13:57 +00:00
|
|
|
&[base_addr..(base_addr - pagesize())],
|
|
|
|
worker.channel.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
worker.close();
|
|
|
|
}
|
|
|
|
|
2022-10-11 07:27:01 +00:00
|
|
|
fn wait_thread_with_timeout<T>(join_handle: thread::JoinHandle<T>, timeout_millis: u64) -> T {
|
|
|
|
for _ in 0..timeout_millis {
|
|
|
|
if join_handle.is_finished() {
|
|
|
|
return join_handle.join().unwrap();
|
|
|
|
}
|
|
|
|
thread::sleep(time::Duration::from_millis(1));
|
|
|
|
}
|
|
|
|
panic!("thread join timeout");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-12-26 09:26:53 +00:00
|
|
|
fn handle_page_fault_zero_success() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
page_handler.handle_page_fault(&uffd, base_addr).unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr + pagesize() + 1)
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr + 3 * pagesize() - 1)
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in 0..(3 * pagesize()) {
|
2022-08-10 18:08:51 +00:00
|
|
|
let ptr = shm.mmap.as_ptr() as usize + i;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
2022-08-10 18:08:51 +00:00
|
|
|
result.push(*(ptr as *mut u8));
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
|
|
|
|
|
|
|
assert_eq!(result, vec![0; 3 * pagesize()]);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn handle_page_fault_invalid_address() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr - 1)
|
2022-10-11 07:27:01 +00:00
|
|
|
.is_err(),
|
|
|
|
true
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr + 3 * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.is_err(),
|
|
|
|
true
|
|
|
|
);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn handle_page_fault_duplicated_page_fault() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2022-11-07 08:12:05 +00:00
|
|
|
page_handler.handle_page_fault(&uffd, base_addr).is_ok(),
|
2022-10-11 07:27:01 +00:00
|
|
|
true
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2022-11-07 08:12:05 +00:00
|
|
|
page_handler.handle_page_fault(&uffd, base_addr + 1).is_ok(),
|
2022-10-11 07:27:01 +00:00
|
|
|
true
|
|
|
|
);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn handle_page_remove_success() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
|
|
|
// fill the first page with zero
|
2022-11-07 08:12:05 +00:00
|
|
|
page_handler.handle_page_fault(&uffd, base_addr).unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
// write value on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
2022-11-07 08:12:05 +00:00
|
|
|
let ptr = base_addr as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
2022-11-07 08:12:05 +00:00
|
|
|
*ptr = 1;
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
});
|
|
|
|
wait_thread_with_timeout(join_handle, 100);
|
2022-11-07 08:12:05 +00:00
|
|
|
let second_page_addr = base_addr + pagesize();
|
2022-10-11 07:27:01 +00:00
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_remove(base_addr, second_page_addr)
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
unsafe {
|
|
|
|
libc::madvise(
|
|
|
|
base_addr as *mut libc::c_void,
|
|
|
|
pagesize(),
|
|
|
|
libc::MADV_REMOVE,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
// fill the first page with zero again
|
2022-11-07 08:12:05 +00:00
|
|
|
page_handler.handle_page_fault(&uffd, base_addr).unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
// read value on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
2022-11-07 08:12:05 +00:00
|
|
|
let ptr = base_addr as *mut u8;
|
|
|
|
unsafe { *ptr }
|
2022-10-11 07:27:01 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
assert_eq!(wait_thread_with_timeout(join_handle, 100), 0);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn handle_page_remove_invalid_address() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let shm = create_shared_memory("shm", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
page_handler.handle_page_fault(&uffd, base_addr).unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr + pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr + 2 * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_remove(base_addr - 1, base_addr + 3 * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.is_err(),
|
|
|
|
true
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_remove(base_addr, base_addr + 3 * pagesize() + 1)
|
2022-10-11 07:27:01 +00:00
|
|
|
.is_err(),
|
|
|
|
true
|
|
|
|
);
|
|
|
|
// remove for whole region should succeed.
|
|
|
|
assert_eq!(
|
|
|
|
page_handler
|
2022-11-07 08:12:05 +00:00
|
|
|
.handle_page_remove(base_addr, base_addr + 3 * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.is_ok(),
|
|
|
|
true
|
|
|
|
);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-12-26 09:26:53 +00:00
|
|
|
fn move_to_staging_data_written_before_enabling() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2023-01-12 09:22:42 +00:00
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
2022-12-26 09:26:53 +00:00
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
2023-01-12 09:22:42 +00:00
|
|
|
let mmap2 = MemoryMappingBuilder::new(3 * pagesize())
|
2022-12-26 09:26:53 +00:00
|
|
|
.from_shared_memory(&shm)
|
2023-01-12 09:22:42 +00:00
|
|
|
.offset(3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
|
|
|
|
2022-11-07 08:12:05 +00:00
|
|
|
let regions = [
|
2023-01-12 09:22:42 +00:00
|
|
|
base_addr1..(base_addr1 + 3 * pagesize()),
|
|
|
|
base_addr2..(base_addr2 + 3 * pagesize()),
|
2022-11-07 08:12:05 +00:00
|
|
|
];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
// write data before registering to userfaultfd
|
|
|
|
unsafe {
|
|
|
|
for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 1;
|
|
|
|
}
|
2023-01-10 09:15:48 +00:00
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
2023-01-12 09:22:42 +00:00
|
|
|
*(i as *mut u8) = 2;
|
2023-01-10 09:15:48 +00:00
|
|
|
}
|
|
|
|
for i in base_addr2 + 2 * pagesize()..base_addr2 + 3 * pagesize() {
|
2023-01-12 09:22:42 +00:00
|
|
|
*(i as *mut u8) = 3;
|
2023-01-10 09:15:48 +00:00
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-01-12 09:22:42 +00:00
|
|
|
// page faults on all pages.
|
|
|
|
for i in 0..3 {
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr1 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr2 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
// read values on another thread to avoid blocking forever
|
2022-10-11 07:27:01 +00:00
|
|
|
let join_handle = thread::spawn(move || {
|
2022-12-26 09:26:53 +00:00
|
|
|
let mut result = Vec::new();
|
2023-01-12 09:22:42 +00:00
|
|
|
for i in 0..3 {
|
2022-12-26 09:26:53 +00:00
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr1 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
}
|
2023-01-12 09:22:42 +00:00
|
|
|
for i in 0..3 {
|
2022-12-26 09:26:53 +00:00
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr2 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
result
|
2022-10-11 07:27:01 +00:00
|
|
|
});
|
2022-12-26 09:26:53 +00:00
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
2023-01-12 09:22:42 +00:00
|
|
|
let values: Vec<u8> = vec![0, 1, 0, 0, 2, 3];
|
2022-12-26 09:26:53 +00:00
|
|
|
for (i, v) in values.iter().enumerate() {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
assert_eq!(&result[i * pagesize() + j], v);
|
|
|
|
}
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 09:22:42 +00:00
|
|
|
fn page_idx_range(start_addr: usize, end_addr: usize) -> Range<usize> {
|
|
|
|
(start_addr / pagesize())..(end_addr / pagesize())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn page_idx_to_addr(page_idx: usize) -> usize {
|
|
|
|
page_idx * pagesize()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn move_to_staging_hugepage_chunks() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2023-01-12 09:22:42 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem =
|
|
|
|
SharedMemory::new("test staging memory", 10 * HUGEPAGE_SIZE as u64).unwrap();
|
2023-01-12 09:22:42 +00:00
|
|
|
let shm = SharedMemory::new("shm", 10 * HUGEPAGE_SIZE as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(5 * HUGEPAGE_SIZE)
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let mmap2 = MemoryMappingBuilder::new(5 * HUGEPAGE_SIZE)
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.offset(5 * HUGEPAGE_SIZE as u64)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
|
|
|
|
|
|
|
let regions = [
|
|
|
|
base_addr1..(base_addr1 + 5 * HUGEPAGE_SIZE),
|
|
|
|
base_addr2..(base_addr2 + 5 * HUGEPAGE_SIZE),
|
|
|
|
];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2023-01-12 09:22:42 +00:00
|
|
|
// write data before registering to userfaultfd
|
|
|
|
unsafe {
|
|
|
|
for i in page_idx_range(base_addr1 + pagesize(), base_addr1 + 3 * pagesize()) {
|
|
|
|
*(page_idx_to_addr(i) as *mut u8) = 1;
|
|
|
|
}
|
|
|
|
for i in page_idx_range(
|
|
|
|
base_addr1 + HUGEPAGE_SIZE - pagesize(),
|
|
|
|
base_addr1 + HUGEPAGE_SIZE + pagesize(),
|
|
|
|
) {
|
|
|
|
*(page_idx_to_addr(i) as *mut u8) = 2;
|
|
|
|
}
|
|
|
|
for i in page_idx_range(
|
|
|
|
base_addr1 + 2 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
base_addr1 + 3 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
) {
|
|
|
|
*(page_idx_to_addr(i) as *mut u8) = 3;
|
|
|
|
}
|
|
|
|
for i in page_idx_range(base_addr2 + HUGEPAGE_SIZE, base_addr2 + 2 * HUGEPAGE_SIZE) {
|
|
|
|
*(page_idx_to_addr(i) as *mut u8) = 4;
|
|
|
|
}
|
|
|
|
for i in page_idx_range(
|
|
|
|
base_addr2 + 2 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
base_addr2 + 5 * HUGEPAGE_SIZE - pagesize(),
|
|
|
|
) {
|
|
|
|
*(page_idx_to_addr(i) as *mut u8) = 5;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
|
|
|
page_handler
|
|
|
|
.move_to_staging(base_addr2, &shm, 5 * HUGEPAGE_SIZE as u64)
|
|
|
|
.unwrap();
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-01-12 09:22:42 +00:00
|
|
|
// page faults on all pages.
|
|
|
|
for i in 0..5 * HUGEPAGE_SIZE / pagesize() {
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr1 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr2 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in page_idx_range(base_addr1, base_addr1 + 5 * HUGEPAGE_SIZE) {
|
|
|
|
let ptr = (page_idx_to_addr(i)) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in page_idx_range(base_addr2, base_addr2 + 5 * HUGEPAGE_SIZE) {
|
|
|
|
let ptr = (page_idx_to_addr(i)) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
|
|
|
assert_eq!(result[0], 0);
|
|
|
|
assert_eq!(result[1], 1);
|
|
|
|
assert_eq!(result[2], 1);
|
|
|
|
for i in page_idx_range(3 * pagesize(), HUGEPAGE_SIZE - pagesize()) {
|
|
|
|
assert_eq!(result[i], 0);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(HUGEPAGE_SIZE - pagesize(), HUGEPAGE_SIZE + pagesize()) {
|
|
|
|
assert_eq!(result[i], 2);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(HUGEPAGE_SIZE + pagesize(), 2 * HUGEPAGE_SIZE + pagesize()) {
|
|
|
|
assert_eq!(result[i], 0);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(
|
|
|
|
2 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
3 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
) {
|
|
|
|
assert_eq!(result[i], 3);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(3 * HUGEPAGE_SIZE + pagesize(), 6 * HUGEPAGE_SIZE) {
|
|
|
|
assert_eq!(result[i], 0);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(6 * HUGEPAGE_SIZE, 7 * HUGEPAGE_SIZE) {
|
|
|
|
assert_eq!(result[i], 4);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(7 * HUGEPAGE_SIZE, 7 * HUGEPAGE_SIZE + pagesize()) {
|
|
|
|
assert_eq!(result[i], 0);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(
|
|
|
|
7 * HUGEPAGE_SIZE + pagesize(),
|
|
|
|
10 * HUGEPAGE_SIZE - pagesize(),
|
|
|
|
) {
|
|
|
|
assert_eq!(result[i], 5);
|
|
|
|
}
|
|
|
|
for i in page_idx_range(10 * HUGEPAGE_SIZE - pagesize(), 10 * HUGEPAGE_SIZE) {
|
|
|
|
assert_eq!(result[i], 0);
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2023-01-12 09:22:42 +00:00
|
|
|
}
|
|
|
|
|
2022-12-26 09:26:53 +00:00
|
|
|
#[test]
|
|
|
|
fn move_to_staging_invalid_base_addr() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-12-26 09:26:53 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 3 * pagesize() as u64).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
let shm = create_shared_memory("shm1", 3 * pagesize());
|
|
|
|
let base_addr = shm.base_addr();
|
|
|
|
let regions = [base_addr..(base_addr + 3 * pagesize())];
|
2023-02-20 03:28:15 +00:00
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
// the base_addr is within the region
|
|
|
|
assert_eq!(
|
2023-01-12 09:22:42 +00:00
|
|
|
unsafe { page_handler.move_to_staging(base_addr + pagesize(), &shm.shm, 0,) }.is_err(),
|
2022-12-26 09:26:53 +00:00
|
|
|
true
|
|
|
|
);
|
|
|
|
// the base_addr is outside of the region
|
|
|
|
assert_eq!(
|
2023-01-12 09:22:42 +00:00
|
|
|
unsafe { page_handler.move_to_staging(base_addr - pagesize(), &shm.shm, 0,) }.is_err(),
|
2022-12-26 09:26:53 +00:00
|
|
|
true
|
|
|
|
);
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
|
|
|
|
2023-03-22 10:00:58 +00:00
|
|
|
fn swap_out_all(page_handler: &PageHandler) {
|
2022-12-26 09:26:53 +00:00
|
|
|
while page_handler.swap_out(1024 * 1024).unwrap() != 0 {}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn swap_out_success() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-12-26 09:26:53 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let mmap2 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.offset(3 * pagesize() as u64)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
|
|
|
let regions = [
|
|
|
|
base_addr1..(base_addr1 + 3 * pagesize()),
|
|
|
|
base_addr2..(base_addr2 + 3 * pagesize()),
|
|
|
|
];
|
2023-03-22 10:00:58 +00:00
|
|
|
let page_handler =
|
2023-02-20 03:28:15 +00:00
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
// write data before registering to userfaultfd
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe {
|
2022-12-26 09:26:53 +00:00
|
|
|
for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 1;
|
|
|
|
}
|
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 2;
|
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-03-22 10:00:58 +00:00
|
|
|
swap_out_all(&page_handler);
|
2022-10-11 07:27:01 +00:00
|
|
|
// page faults on all pages. page 0 and page 2 will be swapped in from the file. page 1 will
|
|
|
|
// be filled with zero.
|
|
|
|
for i in 0..3 {
|
|
|
|
page_handler
|
2022-12-26 09:26:53 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr1 + i * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
page_handler
|
2022-12-26 09:26:53 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr2 + i * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
|
2022-10-11 07:27:01 +00:00
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr1 + i * pagesize() + j) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr2 + i * pagesize() + j) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
2022-12-26 09:26:53 +00:00
|
|
|
let values: Vec<u8> = vec![0, 1, 0, 0, 2, 0];
|
2022-10-11 07:27:01 +00:00
|
|
|
for (i, v) in values.iter().enumerate() {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
assert_eq!(&result[i * pagesize() + j], v);
|
|
|
|
}
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-12-26 09:26:53 +00:00
|
|
|
fn swap_out_handled_page() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
|
|
|
|
let regions = [base_addr1..(base_addr1 + 3 * pagesize())];
|
2023-03-22 10:00:58 +00:00
|
|
|
let page_handler =
|
2023-02-20 03:28:15 +00:00
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
// write data before registering to userfaultfd
|
|
|
|
unsafe {
|
|
|
|
for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 1;
|
|
|
|
}
|
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
2022-10-11 07:27:01 +00:00
|
|
|
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2022-12-26 09:26:53 +00:00
|
|
|
// page in before swap_out()
|
2022-10-11 07:27:01 +00:00
|
|
|
page_handler
|
2022-12-26 09:26:53 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr1 + pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
2023-03-22 10:00:58 +00:00
|
|
|
swap_out_all(&page_handler);
|
2022-12-26 09:26:53 +00:00
|
|
|
|
|
|
|
// read values on another thread to avoid blocking forever
|
2022-10-11 07:27:01 +00:00
|
|
|
let join_handle = thread::spawn(move || {
|
2022-12-26 09:26:53 +00:00
|
|
|
let mut result = Vec::new();
|
2022-10-11 07:27:01 +00:00
|
|
|
for i in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr1 + pagesize() + i) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
2022-12-26 09:26:53 +00:00
|
|
|
result.push(*ptr);
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
result
|
|
|
|
});
|
|
|
|
// reading the page is not blocked.s
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
|
|
|
for v in result {
|
|
|
|
assert_eq!(v, 1);
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn swap_out_twice() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-12-26 09:26:53 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let mmap2 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.offset(3 * pagesize() as u64)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
|
|
|
let regions = [
|
|
|
|
base_addr1..(base_addr1 + 3 * pagesize()),
|
|
|
|
base_addr2..(base_addr2 + 3 * pagesize()),
|
|
|
|
];
|
2023-03-22 10:00:58 +00:00
|
|
|
let page_handler =
|
2023-02-20 03:28:15 +00:00
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
2022-10-11 07:27:01 +00:00
|
|
|
for i in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
*((base_addr1 + i) as *mut u8) = 1;
|
|
|
|
*((base_addr1 + 2 * pagesize() + i) as *mut u8) = 2;
|
|
|
|
*((base_addr2 + i) as *mut u8) = 3;
|
|
|
|
*((base_addr2 + 2 * pagesize() + i) as *mut u8) = 4;
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
}
|
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-03-22 10:00:58 +00:00
|
|
|
swap_out_all(&page_handler);
|
2022-10-11 07:27:01 +00:00
|
|
|
// page faults on all pages in mmap1.
|
|
|
|
for i in 0..3 {
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, (base_addr1) + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
}
|
2022-12-26 09:26:53 +00:00
|
|
|
// write values on another thread to avoid blocking forever
|
2022-10-11 07:27:01 +00:00
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
for i in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr1 + pagesize() + i) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
*ptr = 5;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr1 + 2 * pagesize() + i) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
*ptr = 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
wait_thread_with_timeout(join_handle, 100);
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-03-22 10:00:58 +00:00
|
|
|
swap_out_all(&page_handler);
|
2022-10-11 07:27:01 +00:00
|
|
|
|
|
|
|
// page faults on all pages.
|
|
|
|
for i in 0..3 {
|
|
|
|
page_handler
|
2022-12-26 09:26:53 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr1 + i * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
page_handler
|
2022-12-26 09:26:53 +00:00
|
|
|
.handle_page_fault(&uffd, base_addr2 + i * pagesize())
|
2022-10-11 07:27:01 +00:00
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr1 + i * pagesize() + j) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
2022-12-26 09:26:53 +00:00
|
|
|
let ptr = (base_addr2 + i * pagesize() + j) as *mut u8;
|
2022-10-11 07:27:01 +00:00
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
|
|
|
let values: Vec<u8> = vec![1, 5, 6, 3, 0, 4];
|
|
|
|
for (i, v) in values.iter().enumerate() {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
assert_eq!(&result[i * pagesize() + j], v);
|
|
|
|
}
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-10-11 07:27:01 +00:00
|
|
|
}
|
|
|
|
|
2022-11-07 08:12:05 +00:00
|
|
|
#[test]
|
|
|
|
fn swap_in_success() {
|
2023-01-06 09:35:25 +00:00
|
|
|
let worker = Worker::new(2, 2);
|
2022-11-07 08:12:05 +00:00
|
|
|
let uffd = create_uffd_for_test();
|
2023-02-15 05:13:57 +00:00
|
|
|
let file = tempfile::tempfile().unwrap();
|
2023-02-20 03:28:15 +00:00
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let mmap2 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.offset(3 * pagesize() as u64)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
2022-11-07 08:12:05 +00:00
|
|
|
let regions = [
|
|
|
|
base_addr1..(base_addr1 + 3 * pagesize()),
|
|
|
|
base_addr2..(base_addr2 + 3 * pagesize()),
|
|
|
|
];
|
2023-03-22 10:00:58 +00:00
|
|
|
let page_handler =
|
2023-02-20 03:28:15 +00:00
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
|
|
|
for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 1;
|
|
|
|
}
|
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 2;
|
|
|
|
}
|
|
|
|
for i in base_addr2 + 2 * pagesize()..base_addr2 + 3 * pagesize() {
|
|
|
|
*(i as *mut u8) = 3;
|
|
|
|
}
|
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
unsafe {
|
2023-01-12 09:22:42 +00:00
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-03-22 10:00:58 +00:00
|
|
|
swap_out_all(&page_handler);
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr1 + pagesize())
|
|
|
|
.unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr2 + pagesize())
|
|
|
|
.unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
unsafe {
|
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// move to staging memory.
|
2022-11-07 08:12:05 +00:00
|
|
|
unsafe {
|
2022-12-26 09:26:53 +00:00
|
|
|
page_handler
|
2023-01-12 09:22:42 +00:00
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
2022-12-26 09:26:53 +00:00
|
|
|
.unwrap();
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.channel.wait_complete();
|
2023-03-08 09:38:54 +00:00
|
|
|
let mut swap_in_ctx = page_handler.start_swap_in();
|
|
|
|
while swap_in_ctx.swap_in(&uffd, 1024 * 1024).unwrap() != 0 {}
|
2023-03-22 10:00:58 +00:00
|
|
|
unregister_regions(®ions, array::from_ref(&uffd)).unwrap();
|
|
|
|
|
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr1 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in 0..3 {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr2 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
|
|
|
let values: Vec<u8> = vec![0, 1, 0, 0, 4, 3];
|
|
|
|
for (i, v) in values.iter().enumerate() {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
assert_eq!(&result[i * pagesize() + j], v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
worker.close();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn trim_success() {
|
|
|
|
let worker = Worker::new(2, 2);
|
|
|
|
let uffd = create_uffd_for_test();
|
|
|
|
let file = tempfile::tempfile().unwrap();
|
|
|
|
let staging_shmem = SharedMemory::new("test staging memory", 6 * pagesize() as u64).unwrap();
|
|
|
|
let shm = SharedMemory::new("shm", 6 * pagesize() as u64).unwrap();
|
|
|
|
let mmap1 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let mmap2 = MemoryMappingBuilder::new(3 * pagesize())
|
|
|
|
.from_shared_memory(&shm)
|
|
|
|
.offset(3 * pagesize() as u64)
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let base_addr1 = mmap1.as_ptr() as usize;
|
|
|
|
let base_addr2 = mmap2.as_ptr() as usize;
|
|
|
|
let regions = [
|
|
|
|
base_addr1..(base_addr1 + 3 * pagesize()),
|
|
|
|
base_addr2..(base_addr2 + 3 * pagesize()),
|
|
|
|
];
|
|
|
|
let page_handler =
|
|
|
|
PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap();
|
|
|
|
unsafe {
|
|
|
|
for i in base_addr1..base_addr1 + pagesize() {
|
|
|
|
*(i as *mut u8) = 0;
|
|
|
|
}
|
|
|
|
for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 1;
|
|
|
|
}
|
|
|
|
for i in base_addr2..base_addr2 + pagesize() {
|
|
|
|
*(i as *mut u8) = 0;
|
|
|
|
}
|
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 2;
|
|
|
|
}
|
|
|
|
for i in base_addr2 + 2 * pagesize()..base_addr2 + 3 * pagesize() {
|
|
|
|
*(i as *mut u8) = 3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap();
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
|
|
|
page_handler
|
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
worker.channel.wait_complete();
|
|
|
|
|
|
|
|
let mut trim_ctx = page_handler.start_trim();
|
|
|
|
|
|
|
|
assert_eq!(trim_ctx.trim_pages(6 * pagesize()).unwrap().unwrap(), 1);
|
|
|
|
assert_eq!(trim_ctx.trimmed_clean_pages(), 0);
|
|
|
|
assert_eq!(trim_ctx.trimmed_zero_pages(), 1);
|
|
|
|
// 1 zero page
|
|
|
|
assert_eq!(trim_ctx.trim_pages(6 * pagesize()).unwrap().unwrap(), 1);
|
|
|
|
assert_eq!(trim_ctx.trimmed_clean_pages(), 0);
|
|
|
|
assert_eq!(trim_ctx.trimmed_zero_pages(), 2);
|
|
|
|
|
|
|
|
swap_out_all(&page_handler);
|
|
|
|
for i in 0..3 {
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr1 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
page_handler
|
|
|
|
.handle_page_fault(&uffd, base_addr2 + i * pagesize())
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
unsafe {
|
|
|
|
for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() {
|
|
|
|
*(i as *mut u8) = 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// move to staging memory.
|
|
|
|
unsafe {
|
|
|
|
page_handler.move_to_staging(base_addr1, &shm, 0).unwrap();
|
|
|
|
page_handler
|
|
|
|
.move_to_staging(base_addr2, &shm, 3 * pagesize() as u64)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
worker.channel.wait_complete();
|
|
|
|
|
|
|
|
let mut trim_ctx = page_handler.start_trim();
|
|
|
|
// 2 zero pages and 1 clean page
|
|
|
|
assert_eq!(trim_ctx.trim_pages(6 * pagesize()).unwrap().unwrap(), 3);
|
|
|
|
assert_eq!(trim_ctx.trimmed_clean_pages(), 1);
|
|
|
|
assert_eq!(trim_ctx.trimmed_zero_pages(), 2);
|
|
|
|
// 1 zero page and 1 clean pages
|
|
|
|
assert_eq!(trim_ctx.trim_pages(6 * pagesize()).unwrap().unwrap(), 2);
|
|
|
|
assert_eq!(trim_ctx.trimmed_clean_pages(), 2);
|
|
|
|
assert_eq!(trim_ctx.trimmed_zero_pages(), 3);
|
|
|
|
assert!(trim_ctx.trim_pages(pagesize()).unwrap().is_none());
|
|
|
|
|
|
|
|
let mut swap_in_ctx = page_handler.start_swap_in();
|
|
|
|
while swap_in_ctx.swap_in(&uffd, 1024 * 1024).unwrap() != 0 {}
|
2022-11-07 08:12:05 +00:00
|
|
|
unregister_regions(®ions, array::from_ref(&uffd)).unwrap();
|
2022-12-26 09:26:53 +00:00
|
|
|
|
2022-11-07 08:12:05 +00:00
|
|
|
// read values on another thread to avoid blocking forever
|
|
|
|
let join_handle = thread::spawn(move || {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
for i in 0..3 {
|
2022-12-26 09:26:53 +00:00
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr1 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in 0..3 {
|
2022-12-26 09:26:53 +00:00
|
|
|
for j in 0..pagesize() {
|
|
|
|
let ptr = (base_addr2 + i * pagesize() + j) as *mut u8;
|
|
|
|
unsafe {
|
|
|
|
result.push(*ptr);
|
|
|
|
}
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
result
|
|
|
|
});
|
|
|
|
let result = wait_thread_with_timeout(join_handle, 100);
|
2022-12-26 09:26:53 +00:00
|
|
|
let values: Vec<u8> = vec![0, 1, 0, 0, 4, 3];
|
|
|
|
for (i, v) in values.iter().enumerate() {
|
|
|
|
for j in 0..pagesize() {
|
|
|
|
assert_eq!(&result[i * pagesize() + j], v);
|
|
|
|
}
|
|
|
|
}
|
2023-01-06 09:35:25 +00:00
|
|
|
worker.close();
|
2022-11-07 08:12:05 +00:00
|
|
|
}
|