resources: use RangeInclusive to create allocator

Use RangeInclusive when constructing an AddressAllocator. A followup
change will add an iterator constructor, so using RangeInclusive makes
that API cleaner.

BUG=b:181736020
TEST=cargo test

Change-Id: I59c28b9cbd06c86482c0c57704fc2753a7886ac6
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3516666
Reviewed-by: Junichi Uekawa <uekawa@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: David Stevens <stevensd@chromium.org>
This commit is contained in:
David Stevens 2022-03-10 13:58:48 +09:00 committed by Commit Bot
parent 097623dce4
commit 00ab01e920
3 changed files with 67 additions and 64 deletions

View file

@ -413,8 +413,8 @@ impl arch::LinuxArch for AArch64 {
// Note: This assumes that the ramoops region is the first thing allocated from the high // Note: This assumes that the ramoops region is the first thing allocated from the high
// MMIO region. // MMIO region.
let high_mmio_alloc = system_allocator.mmio_allocator(MmioType::High); let high_mmio_alloc = system_allocator.mmio_allocator(MmioType::High);
let high_mmio_base = high_mmio_alloc.pool_base(); let high_mmio_base = *high_mmio_alloc.pool().start();
let high_mmio_size = high_mmio_alloc.pool_size(); let high_mmio_size = high_mmio_alloc.pool().end() - high_mmio_base + 1;
let (pci_device_base, pci_device_size) = match &ramoops_region { let (pci_device_base, pci_device_size) = match &ramoops_region {
Some(r) => { Some(r) => {
if r.address != high_mmio_base { if r.address != high_mmio_base {

View file

@ -4,6 +4,7 @@
use std::cmp; use std::cmp;
use std::collections::{BTreeSet, HashMap}; use std::collections::{BTreeSet, HashMap};
use std::ops::RangeInclusive;
use crate::{Alloc, Error, Result}; use crate::{Alloc, Error, Result};
@ -13,8 +14,7 @@ use crate::{Alloc, Error, Result};
/// An human-readable tag String must also be provided for debugging / reference. /// An human-readable tag String must also be provided for debugging / reference.
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
pub struct AddressAllocator { pub struct AddressAllocator {
pool_base: u64, pool: RangeInclusive<u64>,
pool_size: u64,
min_align: u64, min_align: u64,
preferred_align: u64, preferred_align: u64,
/// The region that is allocated. /// The region that is allocated.
@ -25,31 +25,22 @@ pub struct AddressAllocator {
impl AddressAllocator { impl AddressAllocator {
/// Creates a new `AddressAllocator` for managing a range of addresses. /// Creates a new `AddressAllocator` for managing a range of addresses.
/// Can return `None` if `pool_base` + `pool_size` overflows a u64 or if alignment isn't a power /// Can return an error if `pool` is empty or if alignment isn't a power of two.
/// of two.
/// ///
/// * `pool_base` - The starting address of the range to manage. /// * `pool` - The address range to allocate from.
/// * `pool_size` - The size of the address range in bytes.
/// * `min_align` - The minimum size of an address region to align to, defaults to four. /// * `min_align` - The minimum size of an address region to align to, defaults to four.
/// * `preferred_align` - The preferred alignment of an address region, used if possible. /// * `preferred_align` - The preferred alignment of an address region, used if possible.
/// ///
/// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment /// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment
/// will be used instead. /// will be used instead.
pub fn new( pub fn new(
pool_base: u64, pool: RangeInclusive<u64>,
pool_size: u64,
min_align: Option<u64>, min_align: Option<u64>,
preferred_align: Option<u64>, preferred_align: Option<u64>,
) -> Result<Self> { ) -> Result<Self> {
if pool_size == 0 { if pool.is_empty() {
return Err(Error::PoolSizeZero); return Err(Error::PoolSizeZero);
} }
let pool_end = pool_base
.checked_add(pool_size - 1)
.ok_or(Error::PoolOverflow {
base: pool_base,
size: pool_size,
})?;
let min_align = min_align.unwrap_or(4); let min_align = min_align.unwrap_or(4);
if !min_align.is_power_of_two() || min_align == 0 { if !min_align.is_power_of_two() || min_align == 0 {
return Err(Error::BadAlignment); return Err(Error::BadAlignment);
@ -61,10 +52,9 @@ impl AddressAllocator {
} }
let mut regions = BTreeSet::new(); let mut regions = BTreeSet::new();
regions.insert((pool_base, pool_end)); regions.insert((*pool.start(), *pool.end()));
Ok(AddressAllocator { Ok(AddressAllocator {
pool_base, pool,
pool_size,
min_align, min_align,
preferred_align, preferred_align,
allocs: HashMap::new(), allocs: HashMap::new(),
@ -72,18 +62,11 @@ impl AddressAllocator {
}) })
} }
/// Gets the starting address of the allocator. /// Gets the pool managed by the allocator.
/// ///
/// This returns the original `pool_base` value provided to `AddressAllocator::new()`. /// This returns the original `pool` value provided to `AddressAllocator::new()`.
pub fn pool_base(&self) -> u64 { pub fn pool(&self) -> RangeInclusive<u64> {
self.pool_base self.pool.clone()
}
/// Gets the size of the allocator's address range in bytes.
///
/// This returns the original `pool_size` value provided to `AddressAllocator::new()`.
pub fn pool_size(&self) -> u64 {
self.pool_size
} }
fn internal_allocate_with_align( fn internal_allocate_with_align(
@ -434,7 +417,8 @@ mod tests {
#[test] #[test]
fn example() { fn example() {
// Anon is used for brevity. Don't manually instantiate Anon allocs! // Anon is used for brevity. Don't manually instantiate Anon allocs!
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x110, Alloc::Anon(0), "caps".to_string()), pool.allocate(0x110, Alloc::Anon(0), "caps".to_string()),
Ok(0x1000) Ok(0x1000)
@ -453,29 +437,27 @@ mod tests {
); );
} }
#[test]
fn new_fails_overflow() {
assert!(AddressAllocator::new(u64::max_value(), 0x100, None, None).is_err());
}
#[test] #[test]
fn new_fails_size_zero() { fn new_fails_size_zero() {
assert!(AddressAllocator::new(0x1000, 0, None, None).is_err()); assert!(AddressAllocator::new(RangeInclusive::new(0x1000, 0), None, None).is_err());
} }
#[test] #[test]
fn new_fails_alignment_zero() { fn new_fails_alignment_zero() {
assert!(AddressAllocator::new(0x1000, 0x10000, Some(0), None).is_err()); assert!(AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0), None).is_err());
} }
#[test] #[test]
fn new_fails_alignment_non_power_of_two() { fn new_fails_alignment_non_power_of_two() {
assert!(AddressAllocator::new(0x1000, 0x10000, Some(200), None).is_err()); assert!(
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(200), None).is_err()
);
} }
#[test] #[test]
fn allocate_fails_exising_alloc() { fn allocate_fails_exising_alloc() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000) Ok(0x1000)
@ -488,7 +470,8 @@ mod tests {
#[test] #[test]
fn allocate_fails_not_enough_space() { fn allocate_fails_not_enough_space() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000) Ok(0x1000)
@ -505,7 +488,8 @@ mod tests {
#[test] #[test]
fn allocate_with_special_alignment() { fn allocate_with_special_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")), pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000) Ok(0x1000)
@ -522,7 +506,8 @@ mod tests {
#[test] #[test]
fn allocate_and_split_allocate_at() { fn allocate_and_split_allocate_at() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(1), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(1), None).unwrap();
// 0x1200..0x1a00 // 0x1200..0x1a00
assert_eq!( assert_eq!(
pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")), pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")),
@ -571,7 +556,8 @@ mod tests {
#[test] #[test]
fn allocate_alignment() { fn allocate_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000) Ok(0x1000)
@ -584,7 +570,8 @@ mod tests {
#[test] #[test]
fn allocate_retrieve_alloc() { fn allocate_retrieve_alloc() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000) Ok(0x1000)
@ -597,7 +584,8 @@ mod tests {
#[test] #[test]
fn allocate_with_alignment_allocator_alignment() { fn allocate_with_alignment_allocator_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x1), pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x1),
Ok(0x1000) Ok(0x1000)
@ -610,7 +598,8 @@ mod tests {
#[test] #[test]
fn allocate_with_alignment_custom_alignment() { fn allocate_with_alignment_custom_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x4), None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x4), None).unwrap();
assert_eq!( assert_eq!(
pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100),
Ok(0x1000) Ok(0x1000)
@ -623,7 +612,8 @@ mod tests {
#[test] #[test]
fn allocate_with_alignment_no_allocator_alignment() { fn allocate_with_alignment_no_allocator_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap();
assert_eq!( assert_eq!(
pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100),
Ok(0x1000) Ok(0x1000)
@ -636,7 +626,8 @@ mod tests {
#[test] #[test]
fn allocate_with_alignment_alignment_non_power_of_two() { fn allocate_with_alignment_alignment_non_power_of_two() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap();
assert!(pool assert!(pool
.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 200) .allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 200)
.is_err()); .is_err());
@ -644,7 +635,8 @@ mod tests {
#[test] #[test]
fn allocate_with_release() { fn allocate_with_release() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap();
assert_eq!( assert_eq!(
pool.allocate_with_align(0x100, Alloc::Anon(0), String::from("bar0"), 0x100), pool.allocate_with_align(0x100, Alloc::Anon(0), String::from("bar0"), 0x100),
Ok(0x1000) Ok(0x1000)
@ -658,7 +650,8 @@ mod tests {
#[test] #[test]
fn coalescing_and_overlap() { fn coalescing_and_overlap() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap();
assert!(pool.insert_at(0x3000, 0x1000).is_ok()); assert!(pool.insert_at(0x3000, 0x1000).is_ok());
assert!(pool.insert_at(0x1fff, 0x20).is_err()); assert!(pool.insert_at(0x1fff, 0x20).is_err());
assert!(pool.insert_at(0x2ff1, 0x10).is_err()); assert!(pool.insert_at(0x2ff1, 0x10).is_err());
@ -672,7 +665,8 @@ mod tests {
#[test] #[test]
fn coalescing_single_addresses() { fn coalescing_single_addresses() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap();
assert!(pool.insert_at(0x2001, 1).is_ok()); assert!(pool.insert_at(0x2001, 1).is_ok());
assert!(pool.insert_at(0x2003, 1).is_ok()); assert!(pool.insert_at(0x2003, 1).is_ok());
assert!(pool.insert_at(0x2000, 1).is_ok()); assert!(pool.insert_at(0x2000, 1).is_ok());
@ -685,7 +679,8 @@ mod tests {
#[test] #[test]
fn allocate_and_verify_pci_offset() { fn allocate_and_verify_pci_offset() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, None, None).unwrap(); let mut pool =
AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap();
let pci_bar0 = Alloc::PciBar { let pci_bar0 = Alloc::PciBar {
bus: 1, bus: 1,
dev: 2, dev: 2,

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::ops::RangeInclusive;
use base::pagesize; use base::pagesize;
@ -60,6 +61,13 @@ pub struct SystemAllocator {
next_anon_id: usize, next_anon_id: usize,
} }
fn to_range_inclusive(base: u64, size: u64) -> Result<RangeInclusive<u64>> {
let end = base
.checked_add(size.checked_sub(1).ok_or(Error::PoolSizeZero)?)
.ok_or(Error::PoolOverflow { base, size })?;
Ok(RangeInclusive::new(base, end))
}
impl SystemAllocator { impl SystemAllocator {
/// Creates a new `SystemAllocator` for managing addresses and irq numbers. /// Creates a new `SystemAllocator` for managing addresses and irq numbers.
/// Will return an error if `base` + `size` overflows u64 (or allowed /// Will return an error if `base` + `size` overflows u64 (or allowed
@ -75,22 +83,24 @@ impl SystemAllocator {
if io.base > 0x1_0000 || io.size + io.base > 0x1_0000 { if io.base > 0x1_0000 || io.size + io.base > 0x1_0000 {
return Err(Error::IOPortOutOfRange(io.base, io.size)); return Err(Error::IOPortOutOfRange(io.base, io.size));
} }
Some(AddressAllocator::new(io.base, io.size, Some(0x400), None)?) Some(AddressAllocator::new(
to_range_inclusive(io.base, io.size)?,
Some(0x400),
None,
)?)
} else { } else {
None None
}, },
mmio_address_spaces: [ mmio_address_spaces: [
// MmioType::Low // MmioType::Low
AddressAllocator::new( AddressAllocator::new(
config.low_mmio.base, to_range_inclusive(config.low_mmio.base, config.low_mmio.size)?,
config.low_mmio.size,
Some(page_size), Some(page_size),
None, None,
)?, )?,
// MmioType::High // MmioType::High
AddressAllocator::new( AddressAllocator::new(
config.high_mmio.base, to_range_inclusive(config.high_mmio.base, config.high_mmio.size)?,
config.high_mmio.size,
Some(page_size), Some(page_size),
None, None,
)?, )?,
@ -100,8 +110,7 @@ impl SystemAllocator {
mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio { mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio {
Some(AddressAllocator::new( Some(AddressAllocator::new(
platform.base, to_range_inclusive(platform.base, platform.size)?,
platform.size,
Some(page_size), Some(page_size),
None, None,
)?) )?)
@ -110,8 +119,7 @@ impl SystemAllocator {
}, },
irq_allocator: AddressAllocator::new( irq_allocator: AddressAllocator::new(
config.first_irq as u64, RangeInclusive::new(config.first_irq as u64, 1023),
1024 - config.first_irq as u64,
Some(1), Some(1),
None, None,
)?, )?,
@ -150,7 +158,7 @@ impl SystemAllocator {
// Each bus supports up to 32 (devices) x 8 (functions). // Each bus supports up to 32 (devices) x 8 (functions).
// Prefer allocating at device granularity (preferred_align = 8), but fall back to // Prefer allocating at device granularity (preferred_align = 8), but fall back to
// allocating individual functions (min_align = 1) when we run out of devices. // allocating individual functions (min_align = 1) when we run out of devices.
match AddressAllocator::new(base, (32 * 8) - base, Some(1), Some(8)) { match AddressAllocator::new(RangeInclusive::new(base, (32 * 8) - 1), Some(1), Some(8)) {
Ok(v) => self.pci_allocator.insert(bus, v), Ok(v) => self.pci_allocator.insert(bus, v),
Err(_) => return None, Err(_) => return None,
}; };