mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2024-11-24 04:09:48 +00:00
Replace ::max_value() with ::MAX
The max_value() function is considered to be a "legacy numeric constant" now, and future clippy versions will warn about it: <https://rust-lang.github.io/rust-clippy/master/index.html#legacy_numeric_constants> BUG=None TEST=tools/clippy # with rust-toolchain = "1.79" Change-Id: I72a333dc4aa1f48cf71744c848f050097a7e7f55 Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5690374 Commit-Queue: Daniel Verkamp <dverkamp@chromium.org> Reviewed-by: Dennis Kempin <denniskempin@google.com>
This commit is contained in:
parent
da353a0dbf
commit
dc310d7cb6
33 changed files with 57 additions and 65 deletions
|
@ -190,7 +190,7 @@ pub type Word = u16;
|
|||
|
||||
impl Aml for Word {
|
||||
fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
||||
if *self <= Byte::max_value().into() {
|
||||
if *self <= Byte::MAX.into() {
|
||||
(*self as Byte).to_aml_bytes(bytes);
|
||||
} else {
|
||||
bytes.push(WORDPREFIX);
|
||||
|
@ -203,7 +203,7 @@ pub type DWord = u32;
|
|||
|
||||
impl Aml for DWord {
|
||||
fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
||||
if *self <= Word::max_value().into() {
|
||||
if *self <= Word::MAX.into() {
|
||||
(*self as Word).to_aml_bytes(bytes);
|
||||
} else {
|
||||
bytes.push(DWORDPREFIX);
|
||||
|
@ -216,7 +216,7 @@ pub type QWord = u64;
|
|||
|
||||
impl Aml for QWord {
|
||||
fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
|
||||
if *self <= DWord::max_value().into() {
|
||||
if *self <= DWord::MAX.into() {
|
||||
(*self as DWord).to_aml_bytes(bytes);
|
||||
} else {
|
||||
bytes.push(QWORDPREFIX);
|
||||
|
|
|
@ -1268,7 +1268,7 @@ where
|
|||
{
|
||||
let size = image.get_len().map_err(LoadImageError::GetLen)?;
|
||||
|
||||
if size > usize::max_value() as u64 || size > max_size {
|
||||
if size > usize::MAX as u64 || size > max_size {
|
||||
return Err(LoadImageError::ImageSizeTooLarge(size));
|
||||
}
|
||||
|
||||
|
@ -1318,7 +1318,7 @@ where
|
|||
return Err(LoadImageError::ZeroSizedImage);
|
||||
}
|
||||
|
||||
if size > usize::max_value() as u64 || size > max_size {
|
||||
if size > usize::MAX as u64 || size > max_size {
|
||||
return Err(LoadImageError::ImageSizeTooLarge(size));
|
||||
}
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ impl MemoryMapping {
|
|||
// and set the (ANONYMOUS | NORESERVE) flag.
|
||||
let (fd, offset) = match fd {
|
||||
Some((fd, offset)) => {
|
||||
if offset > libc::off64_t::max_value() as u64 {
|
||||
if offset > libc::off64_t::MAX as u64 {
|
||||
return Err(Error::InvalidOffset);
|
||||
}
|
||||
// Map private for read-only seal. See below for upstream relax of the restriction.
|
||||
|
@ -1032,11 +1032,11 @@ mod tests {
|
|||
#[test]
|
||||
fn slice_overflow_error() {
|
||||
let m = MemoryMappingBuilder::new(5).build().unwrap();
|
||||
let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
|
||||
let res = m.get_slice(usize::MAX, 3).unwrap_err();
|
||||
assert_eq!(
|
||||
res,
|
||||
VolatileMemoryError::Overflow {
|
||||
base: std::usize::MAX,
|
||||
base: usize::MAX,
|
||||
offset: 3,
|
||||
}
|
||||
);
|
||||
|
@ -1051,8 +1051,8 @@ mod tests {
|
|||
#[test]
|
||||
fn from_fd_offset_invalid() {
|
||||
let fd = tempfile().unwrap();
|
||||
let res = MemoryMapping::from_fd_offset(&fd, 4096, (libc::off64_t::max_value() as u64) + 1)
|
||||
.unwrap_err();
|
||||
let res =
|
||||
MemoryMapping::from_fd_offset(&fd, 4096, (libc::off64_t::MAX as u64) + 1).unwrap_err();
|
||||
match res {
|
||||
Error::InvalidOffset => {}
|
||||
e => panic!("unexpected error: {}", e),
|
||||
|
|
|
@ -210,13 +210,13 @@ pub fn fallocate<F: AsRawDescriptor>(
|
|||
offset: u64,
|
||||
len: u64,
|
||||
) -> Result<()> {
|
||||
let offset = if offset > libc::off64_t::max_value() as u64 {
|
||||
let offset = if offset > libc::off64_t::MAX as u64 {
|
||||
return Err(Error::new(libc::EINVAL));
|
||||
} else {
|
||||
offset as libc::off64_t
|
||||
};
|
||||
|
||||
let len = if len > libc::off64_t::max_value() as u64 {
|
||||
let len = if len > libc::off64_t::MAX as u64 {
|
||||
return Err(Error::new(libc::EINVAL));
|
||||
} else {
|
||||
len as libc::off64_t
|
||||
|
@ -500,7 +500,7 @@ pub fn poll_in<F: AsRawDescriptor>(fd: &F) -> bool {
|
|||
|
||||
/// Return the maximum Duration that can be used with libc::timespec.
|
||||
pub fn max_timeout() -> Duration {
|
||||
Duration::new(libc::time_t::max_value() as u64, 999999999)
|
||||
Duration::new(libc::time_t::MAX as u64, 999999999)
|
||||
}
|
||||
|
||||
/// If the given path is of the form /proc/self/fd/N for some N, returns `Ok(Some(N))`. Otherwise
|
||||
|
|
|
@ -217,7 +217,7 @@ impl<T: EventToken> EventContext<T> {
|
|||
// into `epoll_event` structures after the call.
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
let timeout_millis = if timeout.as_secs() as i64 == i64::max_value() {
|
||||
let timeout_millis = if timeout.as_secs() as i64 == i64::MAX {
|
||||
// We make the convenient assumption that 2^63 seconds is an effectively unbounded time
|
||||
// frame. This is meant to mesh with `wait` calling us with no timeout.
|
||||
-1
|
||||
|
@ -228,8 +228,8 @@ impl<T: EventToken> EventContext<T> {
|
|||
.as_secs()
|
||||
.checked_mul(1_000)
|
||||
.and_then(|ms| ms.checked_add(u64::from(timeout.subsec_nanos()) / 1_000_000))
|
||||
.unwrap_or(i32::max_value() as u64);
|
||||
min(i32::max_value() as u64, millis) as i32
|
||||
.unwrap_or(i32::MAX as u64);
|
||||
min(i32::MAX as u64, millis) as i32
|
||||
};
|
||||
let ret = {
|
||||
let max_events = epoll_events.len() as c_int;
|
||||
|
|
|
@ -37,7 +37,7 @@ const AF_VSOCK: sa_family_t = 40;
|
|||
const VMADDR_CID_LOCAL: c_uint = 1;
|
||||
|
||||
/// Vsock equivalent of binding on port 0. Binds to a random port.
|
||||
pub const VMADDR_PORT_ANY: c_uint = c_uint::max_value();
|
||||
pub const VMADDR_PORT_ANY: c_uint = c_uint::MAX;
|
||||
|
||||
// The number of bytes of padding to be added to the sockaddr_vm struct. Taken directly
|
||||
// from linux/vm_sockets.h.
|
||||
|
|
|
@ -297,11 +297,11 @@ mod tests {
|
|||
fn slice_overflow_error() {
|
||||
let shm = SharedMemory::new("test", 1028).unwrap();
|
||||
let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
|
||||
let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
|
||||
let res = m.get_slice(usize::MAX, 3).unwrap_err();
|
||||
assert_eq!(
|
||||
res,
|
||||
VolatileMemoryError::Overflow {
|
||||
base: std::usize::MAX,
|
||||
base: usize::MAX,
|
||||
offset: 3,
|
||||
}
|
||||
);
|
||||
|
|
|
@ -165,8 +165,7 @@ impl MemoryMapping {
|
|||
|
||||
// on windows, pages needed to be of fixed granular size, and the
|
||||
// maximum valid value is an i64.
|
||||
if file_handle.1 % allocation_granularity() != 0 || file_handle.1 > i64::max_value() as u64
|
||||
{
|
||||
if file_handle.1 % allocation_granularity() != 0 || file_handle.1 > i64::MAX as u64 {
|
||||
return Err(Error::InvalidOffset);
|
||||
}
|
||||
|
||||
|
@ -266,7 +265,7 @@ mod tests {
|
|||
let shm = SharedMemory::new("test", 1028).unwrap();
|
||||
let res = MemoryMappingBuilder::new(4096)
|
||||
.from_shared_memory(&shm)
|
||||
.offset((i64::max_value() as u64) + 1)
|
||||
.offset((i64::MAX as u64) + 1)
|
||||
.build()
|
||||
.unwrap_err();
|
||||
match res {
|
||||
|
|
|
@ -27,7 +27,6 @@ use std::ptr::write_bytes;
|
|||
use std::ptr::write_volatile;
|
||||
use std::result;
|
||||
use std::slice;
|
||||
use std::usize;
|
||||
|
||||
use remain::sorted;
|
||||
use thiserror::Error;
|
||||
|
@ -474,13 +473,12 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn slice_overflow_error() {
|
||||
use std::usize::MAX;
|
||||
let a = VecMem::new(1);
|
||||
let res = a.get_slice(MAX, 1).unwrap_err();
|
||||
let res = a.get_slice(usize::MAX, 1).unwrap_err();
|
||||
assert_eq!(
|
||||
res,
|
||||
Error::Overflow {
|
||||
base: MAX,
|
||||
base: usize::MAX,
|
||||
offset: 1,
|
||||
}
|
||||
);
|
||||
|
|
|
@ -325,7 +325,7 @@ pub fn max<T: BitFieldSpecifier>() -> u64 {
|
|||
if T::FIELD_WIDTH < 64 {
|
||||
(1 << T::FIELD_WIDTH) - 1
|
||||
} else {
|
||||
u64::max_value()
|
||||
u64::MAX
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ impl MsiConfig {
|
|||
|
||||
pub fn read_msi_capability(&self, offset: u32, data: u32) -> u32 {
|
||||
if offset == 0 {
|
||||
(self.ctrl as u32) << 16 | (data & u16::max_value() as u32)
|
||||
(self.ctrl as u32) << 16 | (data & u16::MAX as u32)
|
||||
} else {
|
||||
data
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ impl MsixConfig {
|
|||
if self.masked {
|
||||
msg_ctl |= FUNCTION_MASK_BIT;
|
||||
}
|
||||
(msg_ctl as u32) << 16 | (data & u16::max_value() as u32)
|
||||
(msg_ctl as u32) << 16 | (data & u16::MAX as u32)
|
||||
}
|
||||
|
||||
/// Write to the MSI-X Capability Structure.
|
||||
|
|
|
@ -704,7 +704,7 @@ impl PciConfiguration {
|
|||
.ok_or(Error::BarAddressInvalid(config.addr, config.size))?;
|
||||
match config.region_type {
|
||||
PciBarRegionType::Memory32BitRegion | PciBarRegionType::IoRegion => {
|
||||
if end_addr > u64::from(u32::max_value()) {
|
||||
if end_addr > u64::from(u32::MAX) {
|
||||
return Err(Error::BarAddressInvalid(config.addr, config.size));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -822,7 +822,7 @@ impl BusDevice for PciConfigMmio {
|
|||
// Only allow reads to the register boundary.
|
||||
let start = info.offset as usize % 4;
|
||||
let end = start + data.len();
|
||||
if end > 4 || info.offset > u32::max_value() as u64 {
|
||||
if end > 4 || info.offset > u32::MAX as u64 {
|
||||
for d in data {
|
||||
*d = 0xff;
|
||||
}
|
||||
|
@ -836,7 +836,7 @@ impl BusDevice for PciConfigMmio {
|
|||
}
|
||||
|
||||
fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
|
||||
if info.offset > u32::max_value() as u64 {
|
||||
if info.offset > u32::MAX as u64 {
|
||||
return;
|
||||
}
|
||||
self.config_space_write(info.offset as u32, info.offset % 4, data)
|
||||
|
|
|
@ -11,7 +11,6 @@ use std::path::Path;
|
|||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::u32;
|
||||
|
||||
use acpi_tables::aml::Aml;
|
||||
use base::debug;
|
||||
|
@ -822,7 +821,7 @@ impl VfioPciDevice {
|
|||
base_class_code == PciClassCode::DisplayController && vendor_id == PCI_VENDOR_ID_INTEL;
|
||||
let device_data = if is_intel_gfx {
|
||||
Some(DeviceData::IntelGfxData {
|
||||
opregion_index: u32::max_value(),
|
||||
opregion_index: u32::MAX,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
use std::u32;
|
||||
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
|
|
|
@ -16,7 +16,6 @@ use std::path::PathBuf;
|
|||
use std::ptr::addr_of_mut;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use std::u32;
|
||||
|
||||
use base::error;
|
||||
use base::ioctl;
|
||||
|
|
|
@ -16,7 +16,6 @@ use std::sync::atomic::AtomicU64;
|
|||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::u32;
|
||||
|
||||
use anyhow::Context;
|
||||
use base::debug;
|
||||
|
|
|
@ -21,7 +21,7 @@ use crate::virtio::block::DiskOption;
|
|||
use crate::virtio::BlockAsync;
|
||||
|
||||
pub fn get_seg_max(queue_size: u16) -> u32 {
|
||||
let seg_max = min(max(iov_max(), 1), u32::max_value() as usize) as u32;
|
||||
let seg_max = min(max(iov_max(), 1), u32::MAX as usize) as u32;
|
||||
|
||||
// Since we do not currently support indirect descriptors, the maximum
|
||||
// number of segments must be smaller than the queue size.
|
||||
|
|
|
@ -1403,7 +1403,7 @@ mod tests {
|
|||
} = chain.reader;
|
||||
|
||||
let drain = regions
|
||||
.get_remaining_regions_with_count(::std::usize::MAX)
|
||||
.get_remaining_regions_with_count(usize::MAX)
|
||||
.fold(0usize, |total, region| total + region.len);
|
||||
assert_eq!(drain, 128);
|
||||
|
||||
|
@ -1456,7 +1456,7 @@ mod tests {
|
|||
} = chain.reader;
|
||||
|
||||
let drain = regions
|
||||
.get_remaining_with_count(&memory, ::std::usize::MAX)
|
||||
.get_remaining_with_count(&memory, usize::MAX)
|
||||
.iter()
|
||||
.fold(0usize, |total, iov| total + iov.size());
|
||||
assert_eq!(drain, 128);
|
||||
|
|
|
@ -2551,13 +2551,13 @@ impl FileSystem for PassthroughFs {
|
|||
attr.st_uid
|
||||
} else {
|
||||
// Cannot use -1 here because these are unsigned values.
|
||||
::std::u32::MAX
|
||||
u32::MAX
|
||||
};
|
||||
let gid = if valid.contains(SetattrValid::GID) {
|
||||
attr.st_gid
|
||||
} else {
|
||||
// Cannot use -1 here because these are unsigned values.
|
||||
::std::u32::MAX
|
||||
u32::MAX
|
||||
};
|
||||
|
||||
// SAFETY: this is a constant value that is a nul-terminated string without interior
|
||||
|
|
|
@ -873,7 +873,7 @@ impl VirtioGpu {
|
|||
index
|
||||
);
|
||||
Ok(OkCapsetInfo {
|
||||
capset_id: u32::max_value(),
|
||||
capset_id: u32::MAX,
|
||||
version: 0,
|
||||
size: 0,
|
||||
})
|
||||
|
|
|
@ -60,7 +60,7 @@ pub enum P9Error {
|
|||
#[error("failed to signal used queue: {0}")]
|
||||
SignalUsedQueue(SysError),
|
||||
/// The tag for the 9P device was too large to fit in the config space.
|
||||
#[error("P9 device tag is too long: len = {0}, max = {}", ::std::u16::MAX)]
|
||||
#[error("P9 device tag is too long: len = {0}, max = {}", u16::MAX)]
|
||||
TagTooLong(usize),
|
||||
/// Error while polling for events.
|
||||
#[error("failed to wait for events: {0}")]
|
||||
|
@ -142,7 +142,7 @@ pub struct P9 {
|
|||
|
||||
impl P9 {
|
||||
pub fn new(base_features: u64, tag: &str, p9_cfg: p9::Config) -> P9Result<P9> {
|
||||
if tag.len() > ::std::u16::MAX as usize {
|
||||
if tag.len() > u16::MAX as usize {
|
||||
return Err(P9Error::TagTooLong(tag.len()));
|
||||
}
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ impl Pmem {
|
|||
swap_interval: Option<Duration>,
|
||||
mapping_writable: bool,
|
||||
) -> SysResult<Pmem> {
|
||||
if mapping_size > usize::max_value() as u64 {
|
||||
if mapping_size > usize::MAX as u64 {
|
||||
return Err(SysError::new(libc::EOVERFLOW));
|
||||
}
|
||||
|
||||
|
|
|
@ -652,10 +652,10 @@ mod tests {
|
|||
|
||||
// Assume driver submit another u16::MAX - 0x100 req to device,
|
||||
// Device has handled all of them, so increase self.next_used to u16::MAX
|
||||
for _ in device_generate.0..u16::max_value() {
|
||||
for _ in device_generate.0..u16::MAX {
|
||||
queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
|
||||
}
|
||||
device_generate = Wrapping(u16::max_value());
|
||||
device_generate = Wrapping(u16::MAX);
|
||||
|
||||
// At this moment driver just handled 0x100 interrupts, so it
|
||||
// should inject interrupt.
|
||||
|
@ -737,10 +737,10 @@ mod tests {
|
|||
|
||||
// Assume driver submit another u16::MAX - 0x101 req to device,
|
||||
// Device has handled all of them, so increase self.next_used to u16::MAX
|
||||
for _ in device_generate.0..u16::max_value() {
|
||||
for _ in device_generate.0..u16::MAX {
|
||||
queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
|
||||
}
|
||||
device_generate = Wrapping(u16::max_value());
|
||||
device_generate = Wrapping(u16::MAX);
|
||||
|
||||
// At this moment driver hasn't finished last interrupt yet,
|
||||
// so interrupt isn't needed.
|
||||
|
|
|
@ -317,7 +317,7 @@ impl DecoderBackend for LibvdaDecoder {
|
|||
|
||||
// Raise the first |# of supported raw formats|-th bits because we can assume that any
|
||||
// combination of (a coded format, a raw format) is valid in Chrome.
|
||||
let mask = !(u64::max_value() << caps.output_formats.len());
|
||||
let mask = !(u64::MAX << caps.output_formats.len());
|
||||
|
||||
let mut in_fmts = vec![];
|
||||
let mut profiles: BTreeMap<Format, Vec<Profile>> = Default::default();
|
||||
|
@ -389,7 +389,7 @@ impl DecoderBackend for LibvdaDecoder {
|
|||
|
||||
// Raise the first |# of supported coded formats|-th bits because we can assume that any
|
||||
// combination of (a coded format, a raw format) is valid in Chrome.
|
||||
let mask = !(u64::max_value() << caps.input_formats.len());
|
||||
let mask = !(u64::MAX << caps.input_formats.len());
|
||||
let out_fmts = caps
|
||||
.output_formats
|
||||
.iter()
|
||||
|
|
|
@ -1053,14 +1053,14 @@ impl WlVfd {
|
|||
.send_vectored_with_fds(&data.get_remaining(), rds)
|
||||
.map_err(WlError::SendVfd)?;
|
||||
// All remaining data in `data` is now considered consumed.
|
||||
data.consume(::std::usize::MAX);
|
||||
data.consume(usize::MAX);
|
||||
Ok(WlResp::Ok)
|
||||
} else if let Some((_, local_pipe)) = &mut self.local_pipe {
|
||||
// Impossible to send descriptors over a simple pipe.
|
||||
if !rds.is_empty() {
|
||||
return Ok(WlResp::InvalidType);
|
||||
}
|
||||
data.read_to(local_pipe, usize::max_value())
|
||||
data.read_to(local_pipe, usize::MAX)
|
||||
.map_err(WlError::WritePipe)?;
|
||||
Ok(WlResp::Ok)
|
||||
} else {
|
||||
|
|
|
@ -1619,7 +1619,7 @@ impl PunchHole for QcowFile {
|
|||
let mut remaining = length;
|
||||
let mut offset = offset;
|
||||
while remaining > 0 {
|
||||
let chunk_length = min(remaining, std::usize::MAX as u64) as usize;
|
||||
let chunk_length = min(remaining, usize::MAX as u64) as usize;
|
||||
inner.zero_bytes(offset, chunk_length)?;
|
||||
remaining -= chunk_length as u64;
|
||||
offset += chunk_length as u64;
|
||||
|
|
|
@ -48,7 +48,7 @@ impl QcowRawFile {
|
|||
) -> io::Result<Vec<u64>> {
|
||||
let mut table = vec![0; count as usize];
|
||||
self.file.seek(SeekFrom::Start(offset))?;
|
||||
let mask = mask.unwrap_or(u64::max_value());
|
||||
let mask = mask.unwrap_or(u64::MAX);
|
||||
for ptr in &mut table {
|
||||
let mut value = [0u8; 8];
|
||||
self.file.read_exact(&mut value)?;
|
||||
|
|
|
@ -231,7 +231,7 @@ pub trait ZeroCopyReader {
|
|||
fn copy_to_end(&mut self, f: &mut File, mut off: u64) -> io::Result<usize> {
|
||||
let mut out = 0;
|
||||
loop {
|
||||
match self.read_to(f, ::std::usize::MAX, off) {
|
||||
match self.read_to(f, usize::MAX, off) {
|
||||
Ok(0) => return Ok(out),
|
||||
Ok(n) => {
|
||||
off = off.saturating_add(n as u64);
|
||||
|
@ -324,7 +324,7 @@ pub trait ZeroCopyWriter {
|
|||
fn copy_to_end(&mut self, f: &mut File, mut off: u64) -> io::Result<usize> {
|
||||
let mut out = 0;
|
||||
loop {
|
||||
match self.write_from(f, ::std::usize::MAX, off) {
|
||||
match self.write_from(f, usize::MAX, off) {
|
||||
Ok(0) => return Ok(out),
|
||||
Ok(n) => {
|
||||
off = off.saturating_add(n as u64);
|
||||
|
|
|
@ -1038,8 +1038,8 @@ impl<F: FileSystem + Sync> Server<F> {
|
|||
minor: KERNEL_MINOR_VERSION,
|
||||
max_readahead,
|
||||
flags: enabled.bits() as u32,
|
||||
max_background: ::std::u16::MAX,
|
||||
congestion_threshold: (::std::u16::MAX / 4) * 3,
|
||||
max_background: u16::MAX,
|
||||
congestion_threshold: (u16::MAX / 4) * 3,
|
||||
max_write,
|
||||
time_gran: 1, // nanoseconds
|
||||
max_pages,
|
||||
|
@ -1882,7 +1882,7 @@ fn add_dirent<W: Writer>(
|
|||
) -> io::Result<usize> {
|
||||
// Strip the trailing '\0'.
|
||||
let name = d.name.to_bytes();
|
||||
if name.len() > ::std::u32::MAX as usize {
|
||||
if name.len() > u32::MAX as usize {
|
||||
return Err(io::Error::from_raw_os_error(libc::EOVERFLOW));
|
||||
}
|
||||
|
||||
|
|
|
@ -303,9 +303,8 @@ impl arch::LinuxArch for Riscv64 {
|
|||
return Err(Error::ImageTypeUnsupported);
|
||||
}
|
||||
VmImage::Kernel(ref mut kernel_image) => {
|
||||
let kernel_size =
|
||||
arch::load_image(&mem, kernel_image, get_kernel_addr(), u64::max_value())
|
||||
.map_err(Error::KernelLoadFailure)?;
|
||||
let kernel_size = arch::load_image(&mem, kernel_image, get_kernel_addr(), u64::MAX)
|
||||
.map_err(Error::KernelLoadFailure)?;
|
||||
let kernel_end = get_kernel_addr().offset() + kernel_size as u64;
|
||||
initrd = match components.initrd_image {
|
||||
Some(initrd_file) => {
|
||||
|
|
|
@ -81,7 +81,7 @@ macro_rules! fail_if_zero {
|
|||
|
||||
/// Returns the lower 32 bits of a u64 as a u32 (c_ulong/DWORD)
|
||||
pub fn get_low_order(number: u64) -> c_ulong {
|
||||
(number & (u32::max_value() as u64)) as c_ulong
|
||||
(number & (u32::MAX as u64)) as c_ulong
|
||||
}
|
||||
|
||||
/// Returns the upper 32 bits of a u64 as a u32 (c_ulong/DWORD)
|
||||
|
|
Loading…
Reference in a new issue