io_uring: allocate iovecs based on returned count from the kernel

The kernel is allowed to not only limit the number of requested entries,
but also provide more than is requested. If a non-power-of-2 is
requested, it will be rounded up to the next power of 2.

Ensure that there are enough iovecs by allocating them based on the
number returned from io_uring_setup instead of the number of entries
requested.

TEST=cargo test read_parallel

Change-Id: If92e0a31858d1af53b99af8415002a4f26b48230
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2140915
Reviewed-by: Chirantan Ekbote <chirantan@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Tested-by: Dylan Reid <dgreid@chromium.org>
Commit-Queue: Dylan Reid <dgreid@chromium.org>
This commit is contained in:
Dylan Reid 2020-04-08 00:47:45 +00:00 committed by Commit Bot
parent 23a2b7b8a2
commit 03164ca9d8

View file

@ -125,6 +125,7 @@ impl URingContext {
&ring_params,
);
let num_sqe = ring_params.sq_entries as usize;
let submit_queue_entries = SubmitQueueEntries {
mmap: MemoryMapping::from_fd_offset_populate(
&ring_file,
@ -132,7 +133,7 @@ impl URingContext {
u64::from(IORING_OFF_SQES),
)
.map_err(Error::MappingSubmitEntries)?,
len: ring_params.sq_entries as usize,
len: num_sqe,
};
let complete_ring = CompleteQueueState::new(
@ -156,7 +157,7 @@ impl URingContext {
iov_base: null_mut(),
iov_len: 0
};
num_entries
num_sqe
],
added: 0,
in_flight: 0,
@ -699,6 +700,40 @@ mod tests {
f
}
#[test]
// Queue as many reads as possible and then collect the completions.
fn read_parallel() {
let temp_dir = TempDir::new().unwrap();
const QUEUE_SIZE: usize = 10;
const BUF_SIZE: usize = 0x1000;
let mut uring = URingContext::new(QUEUE_SIZE).unwrap();
let mut buf = [0u8; BUF_SIZE * QUEUE_SIZE];
let f = create_test_file(&temp_dir, (BUF_SIZE * QUEUE_SIZE) as u64);
// check that the whole file can be read and that the queues wrapping is handled by reading
// double the quue depth of buffers.
for i in 0..QUEUE_SIZE * 64 {
let index = i as u64;
unsafe {
let offset = (i % QUEUE_SIZE) * BUF_SIZE;
match uring.add_read(
buf[offset..].as_mut_ptr(),
BUF_SIZE,
f.as_raw_fd(),
offset as u64,
index,
) {
Ok(_) => (),
Err(Error::NoSpace) => {
let _ = uring.wait().unwrap().next().unwrap();
}
Err(_) => panic!("unexpected error from uring wait"),
}
}
}
}
#[test]
fn read_readv() {
let temp_dir = TempDir::new().unwrap();
@ -708,7 +743,7 @@ mod tests {
let mut buf = [0u8; 0x1000];
let f = create_test_file(&temp_dir, 0x1000 * 2);
// check that the whoe file can be read and that the queues wrapping is handled by reading
// check that the whole file can be read and that the queues wrapping is handled by reading
// double the quue depth of buffers.
for i in 0..queue_size * 2 {
let index = i as u64;