From 2768f223eeaf444b797fb2c1fd6405fb6171c66a Mon Sep 17 00:00:00 2001 From: Vikram Auradkar Date: Tue, 12 Dec 2023 20:59:50 +0000 Subject: [PATCH] clippy: enforce safety block comments BUG=b:316174930 TEST=none Change-Id: I5c7811b2c548155aa003e4b71a54bbc16e2f2588 Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5120567 Commit-Queue: Vikram Auradkar Reviewed-by: Dennis Kempin --- .cargo/config.toml | 1 + audio_util/src/file_streams.rs | 1 + base/src/alloc.rs | 23 +- base/src/descriptor.rs | 2 + base/src/descriptor_reflection.rs | 10 +- base/src/iobuf.rs | 6 + base/src/mmap.rs | 13 + base/src/sys/linux/acpi_event.rs | 2 + base/src/sys/linux/capabilities.rs | 5 +- base/src/sys/linux/descriptor.rs | 2 + base/src/sys/linux/event.rs | 13 +- base/src/sys/linux/file.rs | 1 + base/src/sys/linux/file_flags.rs | 1 + base/src/sys/linux/get_filesystem_type.rs | 2 + base/src/sys/linux/linux/syslog.rs | 3 + base/src/sys/linux/mmap.rs | 28 ++- base/src/sys/linux/mod.rs | 24 +- base/src/sys/linux/net.rs | 11 +- base/src/sys/linux/netlink.rs | 7 + base/src/sys/linux/poll.rs | 17 +- base/src/sys/linux/priority.rs | 5 +- base/src/sys/linux/process.rs | 3 + base/src/sys/linux/sched.rs | 7 + base/src/sys/linux/shm.rs | 10 + base/src/sys/linux/signal.rs | 15 ++ base/src/sys/linux/signalfd.rs | 7 + base/src/sys/linux/terminal.rs | 10 + base/src/sys/linux/timer.rs | 9 +- base/src/sys/linux/vsock.rs | 15 +- base/src/sys/unix/descriptor.rs | 10 + base/src/sys/unix/fcntl.rs | 17 +- base/src/sys/unix/file_traits.rs | 8 + base/src/sys/unix/handle_eintr.rs | 1 + base/src/sys/unix/net.rs | 46 +++- base/src/sys/unix/sock_ctrl_msg.rs | 20 ++ base/src/sys/unix/stream_channel.rs | 1 + base/src/sys/unix/system_info.rs | 3 + base/src/sys/windows/console.rs | 1 + base/src/sys/windows/descriptor.rs | 13 + base/src/sys/windows/event.rs | 32 ++- base/src/sys/windows/file_traits.rs | 16 +- base/src/sys/windows/file_util.rs | 7 +- base/src/sys/windows/foreground_window.rs | 1 + base/src/sys/windows/ioctl.rs | 7 + base/src/sys/windows/mmap.rs | 10 + base/src/sys/windows/mmap_platform.rs | 6 + base/src/sys/windows/multi_process_mutex.rs | 5 +- base/src/sys/windows/named_pipes.rs | 54 ++-- base/src/sys/windows/platform_timer_utils.rs | 8 +- base/src/sys/windows/priority.rs | 13 +- base/src/sys/windows/punch_hole.rs | 1 + base/src/sys/windows/sched.rs | 2 + base/src/sys/windows/shm.rs | 6 +- base/src/sys/windows/stream_channel.rs | 9 +- base/src/sys/windows/syslog.rs | 2 + base/src/sys/windows/system_info.rs | 2 + base/src/sys/windows/terminal.rs | 4 + base/src/sys/windows/timer.rs | 7 +- base/src/sys/windows/wait.rs | 22 +- base/src/volatile_memory.rs | 25 +- base/tests/linux/main.rs | 5 + base/tests/linux/net.rs | 2 + base/tests/linux/tube.rs | 6 +- base/tests/process.rs | 2 + base/tests/tube.rs | 6 +- broker_ipc/src/lib.rs | 3 +- common/data_model/src/endian.rs | 3 + common/data_model/src/flexible_array.rs | 2 + cros_async/src/blocking/sys/linux/block_on.rs | 2 + cros_async/src/mem.rs | 2 + cros_async/src/sync/cv.rs | 15 ++ cros_async/src/sync/mu.rs | 33 ++- cros_async/src/sync/spin.rs | 7 + cros_async/src/sync/waiter.rs | 6 + cros_async/src/sys/linux/poll_source.rs | 30 ++- cros_async/src/sys/linux/uring_executor.rs | 29 ++- cros_async/src/sys/windows/event.rs | 13 +- cros_async/src/sys/windows/handle_source.rs | 2 + .../src/sys/windows/io_completion_port.rs | 30 ++- .../src/sys/windows/overlapped_source.rs | 9 + cros_async/src/sys/windows/wait_for_handle.rs | 15 +- cros_async/src/waker.rs | 2 + cros_tracing_types/src/static_strings.rs | 1 + crosvm_control/src/lib.rs | 4 + crosvm_plugin/src/lib.rs | 7 + devices/src/irqchip/kvm/aarch64.rs | 4 + devices/src/pci/coiommu.rs | 36 ++- devices/src/platform/vfio_platform.rs | 1 + devices/src/proxy.rs | 2 + devices/src/serial/sys/windows.rs | 2 + devices/src/tsc.rs | 1 + devices/src/tsc/calibrate.rs | 3 + devices/src/tsc/cpuid.rs | 3 + devices/src/usb/xhci/xhci_abi.rs | 20 ++ devices/src/vfio.rs | 79 ++++-- devices/src/virtio/descriptor_utils.rs | 1 + devices/src/virtio/fs/caps.rs | 4 + devices/src/virtio/fs/passthrough.rs | 119 +++++---- devices/src/virtio/fs/read_dir.rs | 3 + devices/src/virtio/fs/worker.rs | 23 +- devices/src/virtio/gpu/protocol.rs | 1 + devices/src/virtio/gpu/virtio_gpu.rs | 14 +- devices/src/virtio/input/evdev.rs | 45 ++-- devices/src/virtio/iommu.rs | 1 + .../virtio/iommu/sys/linux/vfio_wrapper.rs | 1 + devices/src/virtio/pvclock.rs | 10 +- .../virtio/snd/vios_backend/shm_streams.rs | 16 +- .../src/virtio/snd/vios_backend/shm_vios.rs | 3 +- .../virtio/vhost/user/device/fs/sys/linux.rs | 7 + .../virtio/vhost/user/device/gpu/sys/linux.rs | 7 +- .../vhost/user/device/gpu/sys/windows.rs | 1 + .../src/virtio/vhost/user/device/handler.rs | 3 +- .../vhost/user/device/handler/sys/windows.rs | 21 +- .../virtio/vhost/user/device/net/sys/linux.rs | 1 + devices/src/virtio/vhost/user/device/wl.rs | 1 + .../src/virtio/video/decoder/backend/mod.rs | 2 + .../src/virtio/video/decoder/backend/vaapi.rs | 3 + devices/src/virtio/video/decoder/mod.rs | 2 + .../virtio/video/encoder/backend/ffmpeg.rs | 1 + devices/src/virtio/video/encoder/mod.rs | 4 + devices/src/virtio/video/protocol.rs | 1 + devices/src/virtio/video/resource.rs | 12 +- devices/src/virtio/video/worker.rs | 6 +- devices/src/virtio/vsock/sys/windows/vsock.rs | 2 + devices/src/virtio/wl.rs | 6 + devices/src/vmwdt.rs | 1 + e2e_tests/fixture/src/sys/linux.rs | 1 + e2e_tests/tests/pci_hotplug.rs | 1 + fuse/src/mount.rs | 1 + .../gpu_display_win/keyboard_input_manager.rs | 15 +- gpu_display/src/gpu_display_win/window.rs | 36 +++ .../window_message_dispatcher.rs | 7 + .../window_message_processor.rs | 1 + .../window_procedure_thread.rs | 11 +- gpu_display/src/gpu_display_wl.rs | 22 ++ gpu_display/src/gpu_display_x.rs | 131 ++++++++-- .../geniezone_sys/aarch64/bindings.rs | 1 + hypervisor/src/geniezone/mod.rs | 49 +++- hypervisor/src/gunyah/gunyah_sys/bindings.rs | 1 + hypervisor/src/gunyah/mod.rs | 35 ++- hypervisor/src/haxm.rs | 5 + hypervisor/src/haxm/haxm_sys/bindings.rs | 1 + hypervisor/src/haxm/vcpu.rs | 51 +++- hypervisor/src/haxm/vm.rs | 14 +- hypervisor/src/haxm/win.rs | 6 + hypervisor/src/kvm/aarch64.rs | 19 +- hypervisor/src/kvm/mod.rs | 93 ++++++- hypervisor/src/kvm/x86_64.rs | 203 ++++++++++----- hypervisor/src/x86_64.rs | 5 + hypervisor/tests/kvm/main.rs | 11 +- hypervisor/tests/kvm/x86_64.rs | 9 +- hypervisor/tests/tsc_offsets.rs | 4 + io_uring/src/bindings.rs | 1 + io_uring/src/uring.rs | 31 ++- io_uring/tests/uring.rs | 38 ++- kvm/src/lib.rs | 235 +++++++++++++----- kvm/tests/dirty_log.rs | 1 + kvm/tests/kvm_tests.rs | 3 + kvm/tests/read_only_memory.rs | 1 + kvm/tests/real_run_adder.rs | 1 + kvm_sys/src/aarch64/bindings.rs | 1 + kvm_sys/src/x86/bindings.rs | 1 + kvm_sys/tests/basic.rs | 6 + media/ffmpeg/src/avcodec.rs | 62 ++++- media/ffmpeg/src/avutil.rs | 3 + media/ffmpeg/src/error.rs | 2 + media/ffmpeg/src/lib.rs | 1 + media/ffmpeg/src/swscale.rs | 3 + metrics/src/sys/windows/system_metrics.rs | 7 + net_sys/src/if_tun.rs | 2 + net_sys/src/iff.rs | 1 + net_util/src/slirp/sys/windows.rs | 1 + net_util/src/slirp/sys/windows/handler.rs | 14 +- net_util/src/sys/linux/tap.rs | 45 +++- protos/src/plugin.rs | 2 +- rutabaga_gfx/src/cross_domain/mod.rs | 4 +- .../src/cross_domain/sys/epoll_internal.rs | 9 + rutabaga_gfx/src/cross_domain/sys/linux.rs | 3 + .../src/generated/virgl_renderer_bindings.rs | 4 + rutabaga_gfx/src/gfxstream.rs | 42 +++- rutabaga_gfx/src/rutabaga_2d.rs | 1 + rutabaga_gfx/src/rutabaga_gralloc/minigbm.rs | 26 ++ .../src/rutabaga_gralloc/rendernode.rs | 3 + rutabaga_gfx/src/rutabaga_os/descriptor.rs | 5 + rutabaga_gfx/src/rutabaga_os/shm.rs | 1 + .../src/rutabaga_os/sys/linux/descriptor.rs | 8 + .../rutabaga_os/sys/linux/memory_mapping.rs | 3 + .../src/rutabaga_os/sys/windows/descriptor.rs | 13 +- rutabaga_gfx/src/rutabaga_utils.rs | 8 + rutabaga_gfx/src/virgl_renderer.rs | 65 ++++- src/crosvm/plugin/mod.rs | 8 +- src/crosvm/plugin/process.rs | 2 + src/crosvm/plugin/vcpu.rs | 9 +- src/crosvm/sys/linux.rs | 8 + src/crosvm/sys/linux/config.rs | 2 + src/crosvm/sys/linux/device_helpers.rs | 1 + src/crosvm/sys/linux/vcpu.rs | 1 + src/crosvm/sys/windows/broker.rs | 2 + src/sys/linux/panic_hook.rs | 3 +- src/sys/windows.rs | 5 +- src/sys/windows/main.rs | 7 +- swap/src/controller.rs | 10 +- swap/src/file.rs | 6 + swap/src/page_handler.rs | 4 + swap/src/processes.rs | 2 + swap/src/staging.rs | 16 ++ swap/src/uffd_list.rs | 8 +- swap/src/userfaultfd.rs | 30 ++- swap/tests/main.rs | 14 ++ swap/tests/page_handler.rs | 104 +++++++- tests/plugins.rs | 2 + third_party/libslirp-rs/src/context.rs | 55 +++- third_party/vmm_vhost/src/connection/tube.rs | 7 +- third_party/vmm_vhost/src/lib.rs | 3 +- third_party/vmm_vhost/src/master.rs | 1 + .../vmm_vhost/src/master_req_handler.rs | 1 + .../vmm_vhost/src/master_req_handler/unix.rs | 11 +- .../src/master_req_handler/windows.rs | 6 +- tools/impl/bindgen-common.sh | 1 + usb_sys/src/lib.rs | 5 + usb_util/src/device.rs | 23 +- vfio_sys/src/plat.rs | 1 + vfio_sys/src/vfio.rs | 14 ++ vhost/src/lib.rs | 14 ++ vhost/src/net.rs | 1 + vhost/src/vsock.rs | 2 + virtio_sys/src/vhost.rs | 13 + virtio_sys/src/virtio_fs.rs | 1 + virtio_sys/src/virtio_net.rs | 11 + virtio_sys/src/virtio_ring.rs | 5 + virtio_sys/src/virtio_scsi.rs | 1 + vm_control/src/lib.rs | 5 + vm_control/src/sys/windows/gpu.rs | 1 + vm_memory/src/guest_memory.rs | 19 +- vm_memory/src/udmabuf/sys/linux.rs | 2 + x86_64/src/acpi.rs | 3 + x86_64/src/bootparam.rs | 6 + x86_64/src/cpuid.rs | 20 +- x86_64/src/lib.rs | 3 + x86_64/src/mpspec.rs | 55 ++++ x86_64/src/smbios.rs | 1 + 241 files changed, 2668 insertions(+), 578 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 6094ed351d..a97cddfc9f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -25,4 +25,5 @@ rustflags = [ "-Aclippy::unreadable_literal", "-Aclippy::useless_let_if_seq", "-Aclippy::useless_transmute", + "-Dclippy::undocumented_unsafe_blocks", ] diff --git a/audio_util/src/file_streams.rs b/audio_util/src/file_streams.rs index 1602960104..903361eb37 100644 --- a/audio_util/src/file_streams.rs +++ b/audio_util/src/file_streams.rs @@ -258,6 +258,7 @@ impl AudioMemoryMapping { warn!("Accessing unallocated region"); return &mut self.zero_buffer; } + // SAFETY: // safe because the region returned is owned by self.memory_mapping unsafe { slice::from_raw_parts_mut(self.memory_mapping.as_ptr().add(offset), len) } } diff --git a/base/src/alloc.rs b/base/src/alloc.rs index 7c4bcc8211..a8c1d66e6d 100644 --- a/base/src/alloc.rs +++ b/base/src/alloc.rs @@ -37,6 +37,7 @@ use std::cmp::min; /// let layout = Layout::from_size_align(size, mem::align_of::
()).unwrap(); /// let mut allocation = LayoutAllocation::zeroed(layout); /// +/// // SAFETY: /// // Safe to obtain an exclusive reference because there are no other /// // references to the allocation yet and all-zero is a valid bit pattern for /// // our header. @@ -57,10 +58,9 @@ impl LayoutAllocation { /// incompatible with its type, for example an uninitialized bool or enum. pub fn uninitialized(layout: Layout) -> Self { let ptr = if layout.size() > 0 { - unsafe { - // Safe as long as we guarantee layout.size() > 0. - alloc(layout) - } + // SAFETY: + // Safe as long as we guarantee layout.size() > 0. + unsafe { alloc(layout) } } else { layout.align() as *mut u8 }; @@ -77,10 +77,9 @@ impl LayoutAllocation { /// one of the fields has type NonZeroUsize. pub fn zeroed(layout: Layout) -> Self { let ptr = if layout.size() > 0 { - unsafe { - // Safe as long as we guarantee layout.size() > 0. - alloc_zeroed(layout) - } + // SAFETY: + // Safe as long as we guarantee layout.size() > 0. + unsafe { alloc_zeroed(layout) } } else { layout.align() as *mut u8 }; @@ -159,8 +158,9 @@ impl LayoutAllocation { impl Drop for LayoutAllocation { fn drop(&mut self) { if self.layout.size() > 0 { + // SAFETY: + // Safe as long as we guarantee layout.size() > 0. unsafe { - // Safe as long as we guarantee layout.size() > 0. dealloc(self.ptr, self.layout); } } @@ -178,6 +178,8 @@ mod tests { fn test_as_slice_u32() { let layout = Layout::from_size_align(size_of::() * 15, align_of::()).unwrap(); let allocation = LayoutAllocation::zeroed(layout); + // SAFETY: + // Slice less than the allocation size, which will return a slice of only the requested length. let slice: &[u32] = unsafe { allocation.as_slice(15) }; assert_eq!(slice.len(), 15); assert_eq!(slice[0], 0); @@ -189,6 +191,7 @@ mod tests { let layout = Layout::from_size_align(size_of::() * 15, align_of::()).unwrap(); let allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Slice less than the allocation size, which will return a slice of only the requested length. let slice: &[u32] = unsafe { allocation.as_slice(5) }; assert_eq!(slice.len(), 5); @@ -199,6 +202,7 @@ mod tests { let layout = Layout::from_size_align(size_of::() * 15, align_of::()).unwrap(); let allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Slice more than the allocation size, which will clamp the returned slice len to the limit. let slice: &[u32] = unsafe { allocation.as_slice(100) }; assert_eq!(slice.len(), 15); @@ -210,6 +214,7 @@ mod tests { let layout = Layout::from_size_align(size_of::() * 15 + 2, align_of::()).unwrap(); let allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Slice as many u32s as possible, which should return a slice that only includes the full // u32s, not the trailing 2 bytes. let slice: &[u32] = unsafe { allocation.as_slice(100) }; diff --git a/base/src/descriptor.rs b/base/src/descriptor.rs index 87ec59a11b..222aa2dc63 100644 --- a/base/src/descriptor.rs +++ b/base/src/descriptor.rs @@ -111,6 +111,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor { /// TODO(b/191800567): this API has sharp edges on Windows. We should evaluate making some /// adjustments to smooth those edges. fn try_from(rd: &dyn AsRawDescriptor) -> std::result::Result { + // SAFETY: // Safe because the underlying raw descriptor is guaranteed valid by rd's existence. // // Note that we are cloning the underlying raw descriptor since we have no guarantee of @@ -129,6 +130,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor { impl From for SafeDescriptor { fn from(f: File) -> SafeDescriptor { + // SAFETY: // Safe because we own the File at this point. unsafe { SafeDescriptor::from_raw_descriptor(f.into_raw_descriptor()) } } diff --git a/base/src/descriptor_reflection.rs b/base/src/descriptor_reflection.rs index b62f62807d..27b9137ea3 100644 --- a/base/src/descriptor_reflection.rs +++ b/base/src/descriptor_reflection.rs @@ -398,7 +398,9 @@ pub mod with_as_descriptor { { super::deserialize_descriptor(de) .map(IntoRawDescriptor::into_raw_descriptor) - .map(|rd| unsafe { T::from_raw_descriptor(rd) }) + .map(|rd| + // SAFETY: rd is expected to be valid for the duration of the call. + unsafe { T::from_raw_descriptor(rd) }) } } @@ -462,9 +464,9 @@ mod tests { use super::super::SerializeDescriptors; fn deserialize(json: &str, descriptors: &[RawDescriptor]) -> T { - let safe_descriptors = descriptors - .iter() - .map(|&v| unsafe { SafeDescriptor::from_raw_descriptor(v) }); + let safe_descriptors = descriptors.iter().map(|&v| + // SAFETY: `descriptor` is expected to be valid. + unsafe { SafeDescriptor::from_raw_descriptor(v) }); deserialize_with_descriptors(|| serde_json::from_str(json), safe_descriptors).unwrap() } diff --git a/base/src/iobuf.rs b/base/src/iobuf.rs index 0323e859ec..b1a87783b1 100644 --- a/base/src/iobuf.rs +++ b/base/src/iobuf.rs @@ -35,6 +35,7 @@ pub struct IoBufMut<'a> { impl<'a> IoBufMut<'a> { pub fn new(buf: &mut [u8]) -> IoBufMut<'a> { + // SAFETY: // Safe because buf's memory is of the supplied length, and // guaranteed to exist for the lifetime of the returned value. unsafe { Self::from_raw_parts(buf.as_mut_ptr(), buf.len()) } @@ -74,6 +75,7 @@ impl<'a> IoBufMut<'a> { self.iobuf.set_len(self.len() - count); + // SAFETY: // Safe because we've checked that `count <= self.len()` so both the starting and resulting // pointer are within the bounds of the allocation. self.iobuf.set_ptr(unsafe { self.as_mut_ptr().add(count) }); @@ -114,6 +116,7 @@ impl<'a> IoBufMut<'a> { #[allow(clippy::wrong_self_convention)] #[inline] pub fn as_iobufs<'slice>(iovs: &'slice [IoBufMut<'_>]) -> &'slice [IoBuf] { + // SAFETY: // Safe because `IoBufMut` is ABI-compatible with `IoBuf`. unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBuf, iovs.len()) } } @@ -121,6 +124,7 @@ impl<'a> IoBufMut<'a> { /// Converts a mutable slice of `IoBufMut`s into a mutable slice of `IoBuf`s. #[inline] pub fn as_iobufs_mut<'slice>(iovs: &'slice mut [IoBufMut<'_>]) -> &'slice mut [IoBuf] { + // SAFETY: // Safe because `IoBufMut` is ABI-compatible with `IoBuf`. unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBuf, iovs.len()) } } @@ -138,11 +142,13 @@ impl<'a> AsMut for IoBufMut<'a> { } } +// SAFETY: // It's safe to implement Send + Sync for this type for the same reason that `std::io::IoSliceMut` // is Send + Sync. Internally, it contains a pointer and a length. The integer length is safely Send // + Sync. There's nothing wrong with sending a pointer between threads and de-referencing the // pointer requires an unsafe block anyway. See also https://github.com/rust-lang/rust/pull/70342. unsafe impl<'a> Send for IoBufMut<'a> {} +// SAFETY: See comments for impl Send unsafe impl<'a> Sync for IoBufMut<'a> {} impl<'a> Debug for IoBufMut<'a> { diff --git a/base/src/mmap.rs b/base/src/mmap.rs index b13c617215..890520eacf 100644 --- a/base/src/mmap.rs +++ b/base/src/mmap.rs @@ -135,6 +135,7 @@ impl MemoryMapping { match self.mapping.size().checked_sub(offset) { Some(size_past_offset) => { let bytes_copied = min(size_past_offset, buf.len()); + // SAFETY: // The bytes_copied equation above ensures we don't copy bytes out of range of // either buf or this slice. We also know that the buffers do not overlap because // slices can never occupy the same memory as a volatile slice. @@ -151,6 +152,7 @@ impl MemoryMapping { match self.size().checked_sub(offset) { Some(size_past_offset) => { let bytes_copied = min(size_past_offset, buf.len()); + // SAFETY: // The bytes_copied equation above ensures we don't copy bytes out of range of // either buf or this slice. We also know that the buffers do not overlap because // slices can never occupy the same memory as a volatile slice. @@ -182,6 +184,7 @@ impl MemoryMapping { /// ``` pub fn write_obj(&self, val: T, offset: usize) -> Result<()> { self.mapping.range_end(offset, size_of::())?; + // SAFETY: // This is safe because we checked the bounds above. unsafe { write_unaligned(self.as_ptr().add(offset) as *mut T, val); @@ -210,6 +213,7 @@ impl MemoryMapping { /// ``` pub fn read_obj(&self, offset: usize) -> Result { self.mapping.range_end(offset, size_of::())?; + // SAFETY: // This is safe because by definition Copy types can have their bits set arbitrarily and // still be valid. unsafe { @@ -242,6 +246,7 @@ impl MemoryMapping { // Make sure writes to memory have been committed before performing I/O that could // potentially depend on them. fence(Ordering::SeqCst); + // SAFETY: // This is safe because we checked the bounds above. unsafe { write_volatile(self.as_ptr().add(offset) as *mut T, val); @@ -273,6 +278,7 @@ impl MemoryMapping { /// ``` pub fn read_obj_volatile(&self, offset: usize) -> Result { self.mapping.range_end(offset, size_of::())?; + // SAFETY: // This is safe because by definition Copy types can have their bits set arbitrarily and // still be valid. unsafe { @@ -410,6 +416,7 @@ impl VolatileMemory for MemoryMapping { offset, })?; + // SAFETY: // Safe because we checked that offset + count was within our range and we only ever hand // out volatile accessors. Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) }) @@ -422,6 +429,7 @@ impl VolatileMemory for MemoryMapping { /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that /// can't be unmapped during the `MappedRegion`'s lifetime. pub unsafe trait MappedRegion: Send + Sync { + // SAFETY: /// Returns a pointer to the beginning of the memory region. Should only be /// used for passing this region to ioctls for setting guest memory. fn as_ptr(&self) -> *mut u8; @@ -456,6 +464,7 @@ pub unsafe trait MappedRegion: Send + Sync { } } +// SAFETY: // Safe because it exclusively forwards calls to a safe implementation. unsafe impl MappedRegion for MemoryMapping { fn as_ptr(&self) -> *mut u8 { @@ -473,6 +482,10 @@ pub struct ExternalMapping { pub size: usize, } +// SAFETY: +// `ptr`..`ptr+size` is an mmaped region and is owned by this object. Caller +// needs to ensure that the region is not unmapped during the `MappedRegion`'s +// lifetime. unsafe impl MappedRegion for ExternalMapping { /// used for passing this region to ioctls for setting guest memory. fn as_ptr(&self) -> *mut u8 { diff --git a/base/src/sys/linux/acpi_event.rs b/base/src/sys/linux/acpi_event.rs index 82da0b92da..514619d2f3 100644 --- a/base/src/sys/linux/acpi_event.rs +++ b/base/src/sys/linux/acpi_event.rs @@ -84,7 +84,9 @@ impl AcpiNotifyEvent { // https://github.com/rust-lang/rust/issues/79089, // before using device_class further cast it to u8. let device_class: &[u8; 20usize] = + // SAFETY: trivially safe unsafe { ::std::mem::transmute(&acpi_event.device_class) }; + // SAFETY: trivially safe let bus_id: &[u8; 15usize] = unsafe { ::std::mem::transmute(&acpi_event.bus_id) }; Ok(AcpiNotifyEvent { diff --git a/base/src/sys/linux/capabilities.rs b/base/src/sys/linux/capabilities.rs index c6518dfd90..66fdc821cd 100644 --- a/base/src/sys/linux/capabilities.rs +++ b/base/src/sys/linux/capabilities.rs @@ -20,9 +20,10 @@ extern "C" { /// Drops all capabilities (permitted, inheritable, and effective) from the current process. pub fn drop_capabilities() -> Result<()> { + // SAFETY: + // Safe because we do not actually manipulate any memory handled by libcap + // and we check errors. unsafe { - // Safe because we do not actually manipulate any memory handled by libcap - // and we check errors. let caps = cap_init(); if caps.is_null() { return errno_result(); diff --git a/base/src/sys/linux/descriptor.rs b/base/src/sys/linux/descriptor.rs index bb4f79a7bd..4bfe95393a 100644 --- a/base/src/sys/linux/descriptor.rs +++ b/base/src/sys/linux/descriptor.rs @@ -13,8 +13,10 @@ impl PartialEq for SafeDescriptor { return true; } + // SAFETY: // safe because we only use the return value and libc says it's always successful let pid = unsafe { libc::getpid() }; + // SAFETY: // safe because we are passing everything by value and checking the return value let ret = unsafe { libc::syscall( diff --git a/base/src/sys/linux/event.rs b/base/src/sys/linux/event.rs index f83f561a7d..1a1c033154 100644 --- a/base/src/sys/linux/event.rs +++ b/base/src/sys/linux/event.rs @@ -55,21 +55,24 @@ impl EventExt for crate::Event { impl PlatformEvent { /// Creates a new blocking eventfd with an initial value of 0. pub fn new() -> Result { + // SAFETY: // This is safe because eventfd merely allocated an eventfd for our process and we handle // the error case. let ret = unsafe { eventfd(0, 0) }; if ret < 0 { return errno_result(); } - // This is safe because we checked ret for success and know the kernel gave us an fd that we - // own. Ok(PlatformEvent { + // SAFETY: + // This is safe because we checked ret for success and know the kernel gave us an fd that we + // own. event_handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) }, }) } /// See `EventExt::write_count`. pub fn write_count(&self, v: u64) -> Result<()> { + // SAFETY: // This is safe because we made this fd and the pointer we pass can not overflow because we // give the syscall's size parameter properly. let ret = unsafe { @@ -88,9 +91,10 @@ impl PlatformEvent { /// See `EventExt::read_count`. pub fn read_count(&self) -> Result { let mut buf: u64 = 0; + // SAFETY: + // This is safe because we made this fd and the pointer we pass can not overflow because + // we give the syscall's size parameter properly. let ret = unsafe { - // This is safe because we made this fd and the pointer we pass can not overflow because - // we give the syscall's size parameter properly. read( self.as_raw_descriptor(), &mut buf as *mut u64 as *mut c_void, @@ -121,6 +125,7 @@ impl PlatformEvent { revents: 0, }; let timeoutspec: libc::timespec = duration_to_timespec(timeout); + // SAFETY: // Safe because this only modifies |pfd| and we check the return value let ret = unsafe { libc::ppoll( diff --git a/base/src/sys/linux/file.rs b/base/src/sys/linux/file.rs index 39b898d3d0..533c98dc5b 100644 --- a/base/src/sys/linux/file.rs +++ b/base/src/sys/linux/file.rs @@ -21,6 +21,7 @@ fn lseek(fd: &dyn AsRawDescriptor, offset: u64, option: LseekOption) -> Result libc::SEEK_DATA, LseekOption::Hole => libc::SEEK_HOLE, }; + // SAFETY: // safe because this doesn't modify any memory. let ret = unsafe { libc::lseek64(fd.as_raw_descriptor(), offset as i64, whence) }; if ret < 0 { diff --git a/base/src/sys/linux/file_flags.rs b/base/src/sys/linux/file_flags.rs index acbb4df3cd..e689492eb4 100644 --- a/base/src/sys/linux/file_flags.rs +++ b/base/src/sys/linux/file_flags.rs @@ -24,6 +24,7 @@ pub enum FileFlags { impl FileFlags { pub fn from_file(file: &dyn AsRawDescriptor) -> Result { + // SAFETY: // Trivially safe because fcntl with the F_GETFL command is totally safe and we check for // error. let flags = unsafe { fcntl(file.as_raw_descriptor(), F_GETFL) }; diff --git a/base/src/sys/linux/get_filesystem_type.rs b/base/src/sys/linux/get_filesystem_type.rs index c3cea6fd81..09bc2910e1 100644 --- a/base/src/sys/linux/get_filesystem_type.rs +++ b/base/src/sys/linux/get_filesystem_type.rs @@ -15,9 +15,11 @@ use crate::syscall; #[allow(clippy::unnecessary_cast)] pub fn get_filesystem_type(file: &File) -> Result { let mut statfs_buf = MaybeUninit::::uninit(); + // SAFETY: // Safe because we just got the memory space with exact required amount and // passing that on. syscall!(unsafe { fstatfs64(file.as_raw_fd(), statfs_buf.as_mut_ptr()) })?; + // SAFETY: // Safe because the kernel guarantees the struct is initialized. let statfs_buf = unsafe { statfs_buf.assume_init() }; Ok(statfs_buf.f_type as i64) diff --git a/base/src/sys/linux/linux/syslog.rs b/base/src/sys/linux/linux/syslog.rs index ab2ef14bcb..a6b9b840bf 100644 --- a/base/src/sys/linux/linux/syslog.rs +++ b/base/src/sys/linux/linux/syslog.rs @@ -120,6 +120,7 @@ impl Syslog for PlatformSyslog { // libraries in use that hard depend on libc's syslogger. Remove this and go back to making the // connection directly once minjail is ready. fn openlog_and_get_socket() -> Result { + // SAFETY: // closelog first in case there was already a file descriptor open. Safe because it takes no // arguments and just closes an open file descriptor. Does nothing if the file descriptor // was not already open. @@ -137,6 +138,7 @@ fn openlog_and_get_socket() -> Result { .map_err(Error::GetLowestFd)? .as_raw_fd(); + // SAFETY: See comments for each unsafe line in the block. unsafe { // Safe because openlog accesses no pointers because `ident` is null, only valid flags are // used, and it returns no error. @@ -152,6 +154,7 @@ fn openlog_and_get_socket() -> Result { } fn get_localtime() -> tm { + // SAFETY: See comments for each unsafe line in the block. unsafe { // Safe because tm is just a struct of plain data. let mut tm: tm = mem::zeroed(); diff --git a/base/src/sys/linux/mmap.rs b/base/src/sys/linux/mmap.rs index 6533c4e00b..94be3db64b 100644 --- a/base/src/sys/linux/mmap.rs +++ b/base/src/sys/linux/mmap.rs @@ -60,6 +60,7 @@ impl dyn MappedRegion { pub fn msync(&self, offset: usize, size: usize) -> Result<()> { validate_includes_range(self.size(), offset, size)?; + // SAFETY: // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size // are correct, and we've validated that `offset`..`offset+size` is in the range owned by // this `MappedRegion`. @@ -86,11 +87,13 @@ pub struct MemoryMapping { size: usize, } +// SAFETY: // Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MemoryMapping {} +// SAFETY: See safety comments for impl Send unsafe impl Sync for MemoryMapping {} impl MemoryMapping { @@ -108,6 +111,7 @@ impl MemoryMapping { /// * `size` - Size of memory region in bytes. /// * `prot` - Protection (e.g. readable/writable) of the memory region. pub fn new_protection(size: usize, prot: Protection) -> Result { + // SAFETY: // This is safe because we are creating an anonymous mapping in a place not already used by // any other area in this process. unsafe { MemoryMapping::try_mmap(None, size, prot.into(), None) } @@ -161,9 +165,10 @@ impl MemoryMapping { prot: Protection, populate: bool, ) -> Result { + // SAFETY: + // This is safe because we are creating an anonymous mapping in a place not already used + // by any other area in this process. unsafe { - // This is safe because we are creating an anonymous mapping in a place not already used - // by any other area in this process. MemoryMapping::try_mmap_populate(None, size, prot.into(), Some((fd, offset)), populate) } } @@ -256,6 +261,8 @@ impl MemoryMapping { } // Map private for read-only seal. See below for upstream relax of the restriction. // - https://lore.kernel.org/bpf/20231013103208.kdffpyerufr4ygnw@quack3/T/ + // SAFETY: + // Safe because no third parameter is expected and we check the return result. let seals = unsafe { libc::fcntl(fd.as_raw_descriptor(), libc::F_GET_SEALS) }; if (seals >= 0) && (seals & libc::F_SEAL_WRITE != 0) { flags &= !libc::MAP_SHARED; @@ -288,6 +295,7 @@ impl MemoryMapping { /// Madvise the kernel to unmap on fork. pub fn use_dontfork(&self) -> Result<()> { + // SAFETY: // This is safe because we call madvise with a valid address and size, and we check the // return value. let ret = unsafe { @@ -314,6 +322,7 @@ impl MemoryMapping { return Ok(()); } + // SAFETY: // This is safe because we call madvise with a valid address and size, and we check the // return value. let ret = unsafe { @@ -332,6 +341,7 @@ impl MemoryMapping { /// Calls msync with MS_SYNC on the mapping. pub fn msync(&self) -> Result<()> { + // SAFETY: // This is safe since we use the exact address and length of a known // good memory mapping. let ret = unsafe { @@ -352,6 +362,8 @@ impl MemoryMapping { pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> { self.range_end(mem_offset, count) .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?; + // SAFETY: Safe because all the args to madvise are valid and the return + // value is checked. let ret = unsafe { // madvising away the region is the same as the guest changing it. // Next time it is read, it may return zero pages. @@ -384,6 +396,7 @@ impl MemoryMapping { // Validation self.range_end(mem_offset, count) .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?; + // SAFETY: // Safe because populating the pages from the backed file does not affect the Rust memory // safety. let ret = unsafe { @@ -418,6 +431,7 @@ impl MemoryMapping { // Validation self.range_end(mem_offset, count) .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?; + // SAFETY: // Safe because dropping the page cache does not affect the Rust memory safety. let ret = unsafe { libc::madvise( @@ -448,6 +462,7 @@ impl MemoryMapping { self.range_end(mem_offset, count) .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?; let addr = self.addr as usize + mem_offset; + // SAFETY: // Safe because MLOCK_ONFAULT only affects the swap behavior of the kernel, so it has no // impact on rust semantics. let ret = unsafe { libc::mlock2(addr as *mut _, count, libc::MLOCK_ONFAULT) }; @@ -479,6 +494,7 @@ impl MemoryMapping { // Validation self.range_end(mem_offset, count) .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?; + // SAFETY: // Safe because munlock(2) does not affect the Rust memory safety. let ret = unsafe { libc::munlock((self.addr as usize + mem_offset) as *mut _, count) }; if ret < 0 { @@ -498,6 +514,7 @@ impl MemoryMapping { } } +// SAFETY: // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't // be unmapped until it's Dropped. unsafe impl MappedRegion for MemoryMapping { @@ -512,6 +529,7 @@ unsafe impl MappedRegion for MemoryMapping { impl Drop for MemoryMapping { fn drop(&mut self) { + // SAFETY: // This is safe because we mmap the area at addr ourselves, and nobody // else is holding a reference to it. unsafe { @@ -527,11 +545,13 @@ pub struct MemoryMappingArena { size: usize, } +// SAFETY: // Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MemoryMappingArena {} +// SAFETY: See safety comments for impl Send unsafe impl Sync for MemoryMappingArena {} impl MemoryMappingArena { @@ -635,6 +655,7 @@ impl MemoryMappingArena { } validate_includes_range(self.size(), offset, size)?; + // SAFETY: // This is safe since the range has been validated. let mmap = unsafe { match fd { @@ -665,6 +686,7 @@ impl MemoryMappingArena { } } +// SAFETY: // Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that // won't be unmapped until it's Dropped. unsafe impl MappedRegion for MemoryMappingArena { @@ -712,6 +734,7 @@ impl From for MemoryMappingArena { impl Drop for MemoryMappingArena { fn drop(&mut self) { + // SAFETY: // This is safe because we own this memory range, and nobody else is holding a reference to // it. unsafe { @@ -902,6 +925,7 @@ mod tests { fn slice_addr() { let m = MemoryMappingBuilder::new(5).build().unwrap(); let s = m.get_slice(2, 3).unwrap(); + // SAFETY: all addresses are known to exist. assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) }); } diff --git a/base/src/sys/linux/mod.rs b/base/src/sys/linux/mod.rs index 18f0bde2ab..e6f7755b4f 100644 --- a/base/src/sys/linux/mod.rs +++ b/base/src/sys/linux/mod.rs @@ -124,6 +124,7 @@ pub type Mode = libc::mode_t; /// elsewhere. #[inline(always)] pub fn getpid() -> Pid { + // SAFETY: // Safe because this syscall can never fail and we give it a valid syscall number. unsafe { syscall(SYS_getpid as c_long) as Pid } } @@ -131,12 +132,14 @@ pub fn getpid() -> Pid { /// Safe wrapper for the geppid Linux systemcall. #[inline(always)] pub fn getppid() -> Pid { + // SAFETY: // Safe because this syscall can never fail and we give it a valid syscall number. unsafe { syscall(SYS_getppid as c_long) as Pid } } /// Safe wrapper for the gettid Linux systemcall. pub fn gettid() -> Pid { + // SAFETY: // Calling the gettid() sycall is always safe. unsafe { syscall(SYS_gettid as c_long) as Pid } } @@ -144,6 +147,7 @@ pub fn gettid() -> Pid { /// Safe wrapper for `geteuid(2)`. #[inline(always)] pub fn geteuid() -> Uid { + // SAFETY: // trivially safe unsafe { libc::geteuid() } } @@ -151,6 +155,7 @@ pub fn geteuid() -> Uid { /// Safe wrapper for `getegid(2)`. #[inline(always)] pub fn getegid() -> Gid { + // SAFETY: // trivially safe unsafe { libc::getegid() } } @@ -176,6 +181,7 @@ pub fn flock(file: &F, op: FlockOperation, nonblocking: bool operation |= libc::LOCK_NB; } + // SAFETY: // Safe since we pass in a valid fd and flock operation, and check the return value. syscall!(unsafe { libc::flock(file.as_raw_descriptor(), operation) }).map(|_| ()) } @@ -222,6 +228,7 @@ pub fn fallocate( len as libc::off64_t }; + // SAFETY: // Safe since we pass in a valid fd and fallocate mode, validate offset and len, // and check the return value. syscall!(unsafe { libc::fallocate64(file.as_raw_descriptor(), mode.into(), offset, len) }) @@ -232,10 +239,12 @@ pub fn fallocate( pub fn fstat(f: &F) -> Result { let mut st = MaybeUninit::::zeroed(); + // SAFETY: // Safe because the kernel will only write data in `st` and we check the return // value. syscall!(unsafe { libc::fstat64(f.as_raw_descriptor(), st.as_mut_ptr()) })?; + // SAFETY: // Safe because the kernel guarantees that the struct is now fully initialized. Ok(unsafe { st.assume_init() }) } @@ -252,7 +261,7 @@ ioctl_io_nr!(BLKDISCARD, BLOCK_IO_TYPE, 119); /// Discards the given range of a block file. pub fn discard_block(file: &F, offset: u64, len: u64) -> Result<()> { let range: [u64; 2] = [offset, len]; - // # Safety + // SAFETY: // Safe because // - we check the return value. // - ioctl(BLKDISCARD) does not hold the descriptor after the call. @@ -287,6 +296,7 @@ impl AsRawPid for std::process::Child { pub fn wait_for_pid(pid: A, options: c_int) -> Result<(Option, ExitStatus)> { let pid = pid.as_raw_pid(); let mut status: c_int = 1; + // SAFETY: // Safe because status is owned and the error is checked. let ret = unsafe { libc::waitpid(pid, &mut status, options) }; if ret < 0 { @@ -324,6 +334,7 @@ pub fn wait_for_pid(pid: A, options: c_int) -> Result<(Option, /// } /// ``` pub fn reap_child() -> Result { + // SAFETY: // Safe because we pass in no memory, prevent blocking with WNOHANG, and check for error. let ret = unsafe { waitpid(-1, ptr::null_mut(), WNOHANG) }; if ret == -1 { @@ -338,6 +349,7 @@ pub fn reap_child() -> Result { /// On success, this kills all processes in the current process group, including the current /// process, meaning this will not return. This is equivalent to a call to `kill(0, SIGKILL)`. pub fn kill_process_group() -> Result<()> { + // SAFETY: Safe because pid is 'self group' and return value doesn't matter. unsafe { kill(0, SIGKILL) }?; // Kill succeeded, so this process never reaches here. unreachable!(); @@ -349,12 +361,14 @@ pub fn kill_process_group() -> Result<()> { pub fn pipe(close_on_exec: bool) -> Result<(File, File)> { let flags = if close_on_exec { O_CLOEXEC } else { 0 }; let mut pipe_fds = [-1; 2]; + // SAFETY: // Safe because pipe2 will only write 2 element array of i32 to the given pointer, and we check // for error. let ret = unsafe { pipe2(&mut pipe_fds[0], flags) }; if ret == -1 { errno_result() } else { + // SAFETY: // Safe because both fds must be valid for pipe2 to have returned sucessfully and we have // exclusive ownership of them. Ok(unsafe { @@ -370,6 +384,7 @@ pub fn pipe(close_on_exec: bool) -> Result<(File, File)> { /// /// Returns the new size of the pipe or an error if the OS fails to set the pipe size. pub fn set_pipe_size(fd: RawFd, size: usize) -> Result { + // SAFETY: // Safe because fcntl with the `F_SETPIPE_SZ` arg doesn't touch memory. syscall!(unsafe { fcntl(fd, libc::F_SETPIPE_SZ, size as c_int) }).map(|ret| ret as usize) } @@ -450,12 +465,14 @@ pub fn validate_raw_descriptor(raw_descriptor: RawDescriptor) -> Result Result { // Checking that close-on-exec isn't set helps filter out FDs that were opened by // crosvm as all crosvm FDs are close on exec. + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let flags = unsafe { libc::fcntl(raw_fd, libc::F_GETFD) }; if flags < 0 || (flags & libc::FD_CLOEXEC) != 0 { return Err(Error::new(libc::EBADF)); } + // SAFETY: // Duplicate the fd to ensure that we don't accidentally close an fd previously // opened by another subsystem. Safe because this doesn't modify any memory and // we check the return value. @@ -476,6 +493,7 @@ pub fn poll_in(fd: &F) -> bool { events: libc::POLLIN, revents: 0, }; + // SAFETY: // Safe because we give a valid pointer to a list (of 1) FD and check the return value. let ret = unsafe { libc::poll(&mut fds, 1, 0) }; // An error probably indicates an invalid FD, or an FD that can't be polled. Returning false in @@ -515,6 +533,7 @@ pub fn safe_descriptor_from_path>(path: P) -> Result>(path: P, options: &OpenOptions) -> pub fn max_open_files() -> Result { let mut buf = mem::MaybeUninit::::zeroed(); + // SAFETY: // Safe because this will only modify `buf` and we check the return value. let res = unsafe { libc::prlimit64(0, libc::RLIMIT_NOFILE, ptr::null(), buf.as_mut_ptr()) }; if res == 0 { + // SAFETY: // Safe because the kernel guarantees that the struct is fully initialized. let limit = unsafe { buf.assume_init() }; Ok(limit.rlim_max) @@ -624,6 +645,7 @@ impl sched_attr { } pub fn sched_setattr(pid: Pid, attr: &mut sched_attr, flags: u32) -> Result<()> { + // SAFETY: Safe becuase all the args are valid and the return valud is checked. let ret = unsafe { libc::syscall( libc::SYS_sched_setattr, diff --git a/base/src/sys/linux/net.rs b/base/src/sys/linux/net.rs index 61ae249699..2ba93ac57e 100644 --- a/base/src/sys/linux/net.rs +++ b/base/src/sys/linux/net.rs @@ -143,6 +143,7 @@ impl UnixSeqpacketListener { /// /// The returned socket has the close-on-exec flag set. pub fn accept(&self) -> io::Result { + // SAFETY: // Safe because we own this fd and the kernel will not write to null pointers. match unsafe { libc::accept4( @@ -154,11 +155,11 @@ impl UnixSeqpacketListener { } { -1 => Err(io::Error::last_os_error()), fd => { - // Safe because we checked the return value of accept. Therefore, the return value - // must be a valid socket. - Ok(UnixSeqpacket::from(unsafe { - SafeDescriptor::from_raw_descriptor(fd) - })) + Ok(UnixSeqpacket::from( + // SAFETY: Safe because we checked the return value of accept. Therefore, the + // return value must be a valid socket. + unsafe { SafeDescriptor::from_raw_descriptor(fd) }, + )) } } } diff --git a/base/src/sys/linux/netlink.rs b/base/src/sys/linux/netlink.rs index 9a7ab60d5a..dbd44668cd 100644 --- a/base/src/sys/linux/netlink.rs +++ b/base/src/sys/linux/netlink.rs @@ -179,6 +179,7 @@ impl AsRawDescriptor for NetlinkGenericSocket { impl NetlinkGenericSocket { /// Create and bind a new `NETLINK_GENERIC` socket. pub fn new(nl_groups: u32) -> Result { + // SAFETY: // Safe because we check the return value and convert the raw fd into a SafeDescriptor. let sock = unsafe { let fd = libc::socket( @@ -193,12 +194,14 @@ impl NetlinkGenericSocket { SafeDescriptor::from_raw_descriptor(fd) }; + // SAFETY: // This MaybeUninit dance is needed because sockaddr_nl has a private padding field and // doesn't implement Default. Safe because all 0s is valid data for sockaddr_nl. let mut sa = unsafe { MaybeUninit::::zeroed().assume_init() }; sa.nl_family = libc::AF_NETLINK as libc::sa_family_t; sa.nl_groups = nl_groups; + // SAFETY: // Safe because we pass a descriptor that we own and valid pointer/size for sockaddr. unsafe { let res = libc::bind( @@ -223,6 +226,7 @@ impl NetlinkGenericSocket { .map_err(|_| Error::new(EINVAL))?; let allocation = LayoutAllocation::uninitialized(layout); + // SAFETY: // Safe because we pass a valid, owned socket fd and a valid pointer/size for the buffer. let bytes_read = unsafe { let res = libc::recv(self.sock.as_raw_fd(), allocation.as_ptr(), buf_size, 0); @@ -252,6 +256,7 @@ impl NetlinkGenericSocket { .unwrap(); let mut allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Safe because the data in allocation was initialized up to `buf_size` and is // sufficiently aligned. let data = unsafe { allocation.as_mut_slice(buf_size) }; @@ -288,6 +293,7 @@ impl NetlinkGenericSocket { let payload_end = payload_start + family_name.len(); data[payload_start..payload_end].copy_from_slice(family_name.as_bytes()); + // SAFETY: // Safe because we pass a valid, owned socket fd and a valid pointer/size for the buffer. unsafe { let res = libc::send( @@ -430,6 +436,7 @@ pub struct NetlinkGenericRead { impl NetlinkGenericRead { pub fn iter(&self) -> NetlinkMessageIter { + // SAFETY: // Safe because the data in allocation was initialized up to `self.len` by `recv()` and is // sufficiently aligned. let data = unsafe { &self.allocation.as_slice(self.len) }; diff --git a/base/src/sys/linux/poll.rs b/base/src/sys/linux/poll.rs index 30e0afed75..41011f2626 100644 --- a/base/src/sys/linux/poll.rs +++ b/base/src/sys/linux/poll.rs @@ -61,12 +61,15 @@ pub struct EventContext { impl EventContext { /// Creates a new `EventContext`. pub fn new() -> Result> { + // SAFETY: // Safe because we check the return value. let epoll_fd = unsafe { epoll_create1(EPOLL_CLOEXEC) }; if epoll_fd < 0 { return errno_result(); } Ok(EventContext { + // SAFETY: + // Safe because epoll_fd is valid. epoll_ctx: unsafe { File::from_raw_descriptor(epoll_fd) }, tokens: PhantomData, }) @@ -122,6 +125,7 @@ impl EventContext { events: event_type.into(), u64: token.as_raw_token(), }; + // SAFETY: // Safe because we give a valid epoll FD and FD to watch, as well as a valid epoll_event // structure. Then we check the return value. let ret = unsafe { @@ -145,6 +149,7 @@ impl EventContext { events: event_type.into(), u64: token.as_raw_token(), }; + // SAFETY: // Safe because we give a valid epoll FD and FD to modify, as well as a valid epoll_event // structure. Then we check the return value. let ret = unsafe { @@ -169,6 +174,7 @@ impl EventContext { /// Failure to do so will cause the `wait` method to always return immediately, causing ~100% /// CPU load. pub fn delete(&self, fd: &dyn AsRawDescriptor) -> Result<()> { + // SAFETY: // Safe because we give a valid epoll FD and FD to stop watching. Then we check the return // value. let ret = unsafe { @@ -203,12 +209,12 @@ impl EventContext { /// This may return earlier than `timeout` with zero events if the duration indicated exceeds /// system limits. pub fn wait_timeout(&self, timeout: Duration) -> Result; 16]>> { - // SAFETY: - // `MaybeUnint` has the same layout as plain `T` (`epoll_event` in our case). - // We submit an uninitialized array to the `epoll_wait` system call, which returns how many - // elements it initialized, and then we convert only the initialized `MaybeUnint` values - // into `epoll_event` structures after the call. let mut epoll_events: [MaybeUninit; EVENT_CONTEXT_MAX_EVENTS] = + // SAFETY: + // `MaybeUnint` has the same layout as plain `T` (`epoll_event` in our case). + // We submit an uninitialized array to the `epoll_wait` system call, which returns how many + // elements it initialized, and then we convert only the initialized `MaybeUnint` values + // into `epoll_event` structures after the call. unsafe { MaybeUninit::uninit().assume_init() }; let timeout_millis = if timeout.as_secs() as i64 == i64::max_value() { @@ -227,6 +233,7 @@ impl EventContext { }; let ret = { let max_events = epoll_events.len() as c_int; + // SAFETY: // Safe because we give an epoll context and a properly sized epoll_events array // pointer, which we trust the kernel to fill in properly. The `transmute` is safe, // since `MaybeUnint` has the same layout as `T`, and the `epoll_wait` syscall will diff --git a/base/src/sys/linux/priority.rs b/base/src/sys/linux/priority.rs index d139d5e8d1..4ad86661f3 100644 --- a/base/src/sys/linux/priority.rs +++ b/base/src/sys/linux/priority.rs @@ -13,6 +13,7 @@ pub fn set_rt_prio_limit(limit: u64) -> Result<()> { rlim_cur: limit, rlim_max: limit, }; + // SAFETY: // Safe because the kernel doesn't modify memory that is accessible to the process here. let res = unsafe { libc::setrlimit64(libc::RLIMIT_RTPRIO, &rt_limit_arg) }; @@ -25,13 +26,15 @@ pub fn set_rt_prio_limit(limit: u64) -> Result<()> { /// Sets the current thread to be scheduled using the round robin real time class with `priority`. pub fn set_rt_round_robin(priority: i32) -> Result<()> { + // SAFETY: // Safe because sched_param only contains primitive types for which zero // initialization is valid. let mut sched_param: libc::sched_param = unsafe { MaybeUninit::zeroed().assume_init() }; sched_param.sched_priority = priority; - // Safe because the kernel doesn't modify memory that is accessible to the process here. let res = + // SAFETY: + // Safe because the kernel doesn't modify memory that is accessible to the process here. unsafe { libc::pthread_setschedparam(libc::pthread_self(), libc::SCHED_RR, &sched_param) }; if res != 0 { diff --git a/base/src/sys/linux/process.rs b/base/src/sys/linux/process.rs index be0a728a89..db7ae6165d 100644 --- a/base/src/sys/linux/process.rs +++ b/base/src/sys/linux/process.rs @@ -98,6 +98,7 @@ where let tz = std::env::var("TZ").unwrap_or_default(); + // SAFETY: // Safe because the program is still single threaded. // We own the jail object and nobody else will try to reuse it. let pid = match unsafe { jail.fork(Some(&keep_rds)) }? { @@ -119,6 +120,7 @@ where [..std::cmp::min(MAX_THREAD_LABEL_LEN, debug_label.len())]; match CString::new(debug_label_trimmed) { Ok(thread_name) => { + // SAFETY: // Safe because thread_name is a valid pointer and setting name of this // thread should be safe. let _ = unsafe { @@ -150,6 +152,7 @@ where None => "process.rs: no debug label".to_owned(), }, // Can't use safe wrapper because jail crate depends on base + // SAFETY: // Safe because it's only doing a read within bound checked by static assert unsafe {*(&jail as *const Minijail as *const usize)} ); diff --git a/base/src/sys/linux/sched.rs b/base/src/sys/linux/sched.rs index f95fcae7de..054faf6b83 100644 --- a/base/src/sys/linux/sched.rs +++ b/base/src/sys/linux/sched.rs @@ -26,8 +26,10 @@ struct CpuSet(cpu_set_t); impl CpuSet { pub fn new() -> CpuSet { + // SAFETY: // cpu_set_t is a C struct and can be safely initialized with zeroed memory. let mut cpuset: cpu_set_t = unsafe { mem::MaybeUninit::zeroed().assume_init() }; + // SAFETY: // Safe because we pass a valid cpuset pointer. unsafe { CPU_ZERO(&mut cpuset) }; CpuSet(cpuset) @@ -36,6 +38,7 @@ impl CpuSet { pub fn to_cpus(&self) -> Vec { let mut cpus = Vec::new(); for i in 0..(CPU_SETSIZE as usize) { + // SAFETY: Safe because `i` and `self.0` are valid. if unsafe { CPU_ISSET(i, &self.0) } { cpus.push(i); } @@ -48,6 +51,7 @@ impl FromIterator for CpuSet { fn from_iter>(cpus: I) -> Self { let mut cpuset = CpuSet::new(); for cpu in cpus { + // SAFETY: // Safe because we pass a valid cpu index and cpuset.0 is a valid pointer. unsafe { CPU_SET(cpu, &mut cpuset.0) }; } @@ -78,6 +82,7 @@ pub fn set_cpu_affinity>(cpus: I) -> Result<()> { }) .collect::>()?; + // SAFETY: // Safe because we pass 0 for the current thread, and cpuset is a valid pointer and only // used for the duration of this call. crate::syscall!(unsafe { sched_setaffinity(0, mem::size_of_val(&cpuset), &cpuset) })?; @@ -88,6 +93,7 @@ pub fn set_cpu_affinity>(cpus: I) -> Result<()> { pub fn get_cpu_affinity() -> Result> { let mut cpu_set = CpuSet::new(); + // SAFETY: // Safe because we pass 0 for the current thread, and cpu_set.0 is a valid pointer and only // used for the duration of this call. crate::syscall!(unsafe { sched_getaffinity(0, mem::size_of_val(&cpu_set.0), &mut cpu_set.0) })?; @@ -115,6 +121,7 @@ pub fn enable_core_scheduling() -> Result<()> { PIDTYPE_PGID, } + // SAFETY: Safe because we check the return value to prctl. let ret = unsafe { prctl( PR_SCHED_CORE, diff --git a/base/src/sys/linux/shm.rs b/base/src/sys/linux/shm.rs index 0c34457c0d..98b6fce8a9 100644 --- a/base/src/sys/linux/shm.rs +++ b/base/src/sys/linux/shm.rs @@ -40,6 +40,8 @@ use crate::SharedMemory; const MFD_CLOEXEC: c_uint = 0x0001; const MFD_NOEXEC_SEAL: c_uint = 0x0008; +// SAFETY: It is caller's responsibility to ensure the args are valid and check the +// return value of the function. unsafe fn memfd_create(name: *const c_char, flags: c_uint) -> c_int { syscall(SYS_memfd_create as c_long, name, flags) as c_int } @@ -165,15 +167,19 @@ impl PlatformSharedMemory for SharedMemory { } let shm_name = debug_name.as_ptr() as *const c_char; + // SAFETY: // The following are safe because we give a valid C string and check the // results of the memfd_create call. let fd = unsafe { memfd_create(shm_name, flags) }; if fd < 0 { return errno_result(); } + // SAFETY: Safe because fd is valid. let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; // Set the size of the memfd. + // SAFETY: Safe because we check the return value to ftruncate64 and all the args to the + // function are valid. let ret = unsafe { ftruncate64(descriptor.as_raw_descriptor(), size as off64_t) }; if ret < 0 { return errno_result(); @@ -219,6 +225,8 @@ impl SharedMemoryLinux for SharedMemory { } fn get_seals(&self) -> Result { + // SAFETY: Safe because we check the return value to fcntl and all the args to the + // function are valid. let ret = unsafe { fcntl(self.descriptor.as_raw_descriptor(), F_GET_SEALS) }; if ret < 0 { return errno_result(); @@ -227,6 +235,8 @@ impl SharedMemoryLinux for SharedMemory { } fn add_seals(&mut self, seals: MemfdSeals) -> Result<()> { + // SAFETY: Safe because we check the return value to fcntl and all the args to the + // function are valid. let ret = unsafe { fcntl(self.descriptor.as_raw_descriptor(), F_ADD_SEALS, seals) }; if ret < 0 { return errno_result(); diff --git a/base/src/sys/linux/signal.rs b/base/src/sys/linux/signal.rs index 6524d11ffa..4ef4334df5 100644 --- a/base/src/sys/linux/signal.rs +++ b/base/src/sys/linux/signal.rs @@ -277,12 +277,14 @@ extern "C" { /// Returns the minimum (inclusive) real-time signal number. #[allow(non_snake_case)] pub fn SIGRTMIN() -> c_int { + // SAFETY: trivially safe unsafe { __libc_current_sigrtmin() } } /// Returns the maximum (inclusive) real-time signal number. #[allow(non_snake_case)] pub fn SIGRTMAX() -> c_int { + // SAFETY: trivially safe unsafe { __libc_current_sigrtmax() } } @@ -311,11 +313,13 @@ pub unsafe fn register_signal_handler(num: c_int, handler: extern "C" fn(c_int)) /// Resets the signal handler of signum `num` back to the default. pub fn clear_signal_handler(num: c_int) -> Result<()> { + // SAFETY: // Safe because sigaction is owned and expected to be initialized ot zeros. let mut sigact: sigaction = unsafe { mem::zeroed() }; sigact.sa_flags = SA_RESTART; sigact.sa_sigaction = SIG_DFL; + // SAFETY: // Safe because sigact is owned, and this is restoring the default signal handler. let ret = unsafe { sigaction(num, &sigact, null_mut()) }; if ret < 0 { @@ -345,9 +349,11 @@ pub unsafe fn register_rt_signal_handler(num: c_int, handler: extern "C" fn(c_in /// /// This is a helper function used when we want to manipulate signals. pub fn create_sigset(signals: &[c_int]) -> Result { + // SAFETY: // sigset will actually be initialized by sigemptyset below. let mut sigset: sigset_t = unsafe { mem::zeroed() }; + // SAFETY: // Safe - return value is checked. let ret = unsafe { sigemptyset(&mut sigset) }; if ret < 0 { @@ -355,6 +361,7 @@ pub fn create_sigset(signals: &[c_int]) -> Result { } for signal in signals { + // SAFETY: // Safe - return value is checked. let ret = unsafe { sigaddset(&mut sigset, *signal) }; if ret < 0 { @@ -373,6 +380,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option) -> Result { let ts = duration_to_timespec(timeout); + // SAFETY: // Safe - return value is checked. let ret = handle_eintr_errno!(unsafe { sigtimedwait(&sigset, null_mut(), &ts) }); if ret < 0 { @@ -383,6 +391,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option) -> Result { let mut ret: c_int = 0; + // SAFETY: Safe because args are valid and the return value is checked. let err = handle_eintr_rc!(unsafe { sigwait(&sigset, &mut ret as *mut c_int) }); if err != 0 { Err(ErrnoError::new(err)) @@ -397,6 +406,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option) -> Result SignalResult> { let mut mask = Vec::new(); + // SAFETY: // Safe - return values are checked. unsafe { let mut old_sigset: sigset_t = mem::zeroed(); @@ -422,6 +432,7 @@ pub fn get_blocked_signals() -> SignalResult> { pub fn block_signal(num: c_int) -> SignalResult<()> { let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?; + // SAFETY: // Safe - return values are checked. unsafe { let mut old_sigset: sigset_t = mem::zeroed(); @@ -447,6 +458,7 @@ pub fn block_signal(num: c_int) -> SignalResult<()> { pub fn unblock_signal(num: c_int) -> SignalResult<()> { let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?; + // SAFETY: // Safe - return value is checked. let ret = unsafe { pthread_sigmask(SIG_UNBLOCK, &sigset, null_mut()) }; if ret < 0 { @@ -460,6 +472,7 @@ pub fn clear_signal(num: c_int) -> SignalResult<()> { let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?; while { + // SAFETY: // This is safe as we are rigorously checking return values // of libc calls. unsafe { @@ -535,6 +548,7 @@ pub unsafe trait Killable { return Err(ErrnoError::new(EINVAL)); } + // SAFETY: // Safe because we ensure we are using a valid pthread handle, a valid signal number, and // check the return result. let ret = unsafe { pthread_kill(self.pthread_handle(), num) }; @@ -545,6 +559,7 @@ pub unsafe trait Killable { } } +// SAFETY: // Safe because we fulfill our contract of returning a genuine pthread handle. unsafe impl Killable for JoinHandle { fn pthread_handle(&self) -> pthread_t { diff --git a/base/src/sys/linux/signalfd.rs b/base/src/sys/linux/signalfd.rs index 11dd30a24b..6672ce8f94 100644 --- a/base/src/sys/linux/signalfd.rs +++ b/base/src/sys/linux/signalfd.rs @@ -67,6 +67,7 @@ impl SignalFd { pub fn new(signal: c_int) -> Result { let sigset = signal::create_sigset(&[signal]).map_err(Error::CreateSigset)?; + // SAFETY: // This is safe as we check the return value and know that fd is valid. let fd = unsafe { signalfd(-1, &sigset, SFD_CLOEXEC | SFD_NONBLOCK) }; if fd < 0 { @@ -76,6 +77,7 @@ impl SignalFd { // Mask out the normal handler for the signal. signal::block_signal(signal).map_err(Error::CreateBlockSignal)?; + // SAFETY: // This is safe because we checked fd for success and know the // kernel gave us an fd that we own. unsafe { @@ -88,10 +90,12 @@ impl SignalFd { /// Read a siginfo struct from the signalfd, if available. pub fn read(&self) -> Result> { + // SAFETY: // signalfd_siginfo doesn't have a default, so just zero it. let mut siginfo: signalfd_siginfo = unsafe { mem::zeroed() }; let siginfo_size = mem::size_of::(); + // SAFETY: // This read is safe since we've got the space allocated for a // single signalfd_siginfo, and that's exactly how much we're // reading. Handling of EINTR is not required since SFD_NONBLOCK @@ -166,6 +170,7 @@ mod tests { let sigid = SIGRTMIN() + 1; let sigrt_fd = SignalFd::new(sigid).unwrap(); + // SAFETY: Safe because sigid is valid and return value is checked. let ret = unsafe { raise(sigid) }; assert_eq!(ret, 0); @@ -178,6 +183,7 @@ mod tests { let sigid = SIGRTMIN() + 2; let sigrt_fd = SignalFd::new(sigid).unwrap(); + // SAFETY: Safe because sigset and sigid are valid and return value is checked. unsafe { let mut sigset: sigset_t = mem::zeroed(); pthread_sigmask(0, null(), &mut sigset as *mut sigset_t); @@ -187,6 +193,7 @@ mod tests { mem::drop(sigrt_fd); // The signal should no longer be masked. + // SAFETY: Safe because sigset and sigid are valid and return value is checked. unsafe { let mut sigset: sigset_t = mem::zeroed(); pthread_sigmask(0, null(), &mut sigset as *mut sigset_t); diff --git a/base/src/sys/linux/terminal.rs b/base/src/sys/linux/terminal.rs index 91b47a7209..0065649fd3 100644 --- a/base/src/sys/linux/terminal.rs +++ b/base/src/sys/linux/terminal.rs @@ -24,20 +24,26 @@ use crate::unix::add_fd_flags; use crate::unix::clear_fd_flags; fn modify_mode(fd: RawFd, f: F) -> Result<()> { + // Safety: // Safe because we check the return value of isatty. if unsafe { isatty(fd) } != 1 { return Ok(()); } + // Safety: // The following pair are safe because termios gets totally overwritten by tcgetattr and we // check the return result. let mut termios: termios = unsafe { zeroed() }; + // Safety: + // The following pair are safe because termios gets totally overwritten by tcgetattr and we + // check the return result. let ret = unsafe { tcgetattr(fd, &mut termios as *mut _) }; if ret < 0 { return errno_result(); } let mut new_termios = termios; f(&mut new_termios); + // SAFETY: // Safe because the syscall will only read the extent of termios and we check the return result. let ret = unsafe { tcsetattr(fd, TCSANOW, &new_termios as *const _) }; if ret < 0 { @@ -47,6 +53,8 @@ fn modify_mode(fd: RawFd, f: F) -> Result<()> { Ok(()) } +/// # Safety +/// /// Safe only when the FD given is valid and reading the fd will have no Rust safety implications. unsafe fn read_raw(fd: RawFd, out: &mut [u8]) -> Result { let ret = read(fd, out.as_mut_ptr() as *mut _, out.len()); @@ -63,6 +71,7 @@ unsafe fn read_raw(fd: RawFd, out: &mut [u8]) -> Result { /// around stdin that the stdlib usually uses. If other code is using stdin, it is undefined who /// will get the underlying bytes. pub fn read_raw_stdin(out: &mut [u8]) -> Result { + // SAFETY: // Safe because reading from stdin shouldn't have any safety implications. unsafe { read_raw(STDIN_FILENO, out) } } @@ -99,6 +108,7 @@ pub unsafe trait Terminal { } } +// # SAFETY: // Safe because we return a genuine terminal fd that never changes and shares our lifetime. unsafe impl Terminal for Stdin { fn tty_fd(&self) -> RawFd { diff --git a/base/src/sys/linux/timer.rs b/base/src/sys/linux/timer.rs index 83b1d48921..afd6cd803d 100644 --- a/base/src/sys/linux/timer.rs +++ b/base/src/sys/linux/timer.rs @@ -37,14 +37,16 @@ impl Timer { /// Creates a new timerfd. The timer is initally disarmed and must be armed by calling /// `reset`. pub fn new() -> Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC) }; if ret < 0 { return errno_result(); } - // Safe because we uniquely own the file descriptor. Ok(Timer { + // SAFETY: + // Safe because we uniquely own the file descriptor. handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) }, interval: None, }) @@ -61,6 +63,7 @@ impl Timer { it_value: duration_to_timespec(dur.unwrap_or_default()), }; + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { timerfd_settime(self.as_raw_descriptor(), 0, &spec, ptr::null_mut()) }; if ret < 0 { @@ -87,6 +90,7 @@ impl TimerTrait for Timer { revents: 0, }; + // SAFETY: // Safe because this only modifies |pfd| and we check the return value let ret = handle_eintr_errno!(unsafe { libc::ppoll( @@ -113,6 +117,7 @@ impl TimerTrait for Timer { fn mark_waited(&mut self) -> Result { let mut count = 0u64; + // SAFETY: // The timerfd is in non-blocking mode, so this should return immediately. let ret = unsafe { libc::read( @@ -134,9 +139,11 @@ impl TimerTrait for Timer { } fn resolution(&self) -> Result { + // SAFETY: // Safe because we are zero-initializing a struct with only primitive member fields. let mut res: libc::timespec = unsafe { mem::zeroed() }; + // SAFETY: // Safe because it only modifies a local struct and we check the return value. let ret = unsafe { clock_getres(CLOCK_MONOTONIC, &mut res) }; diff --git a/base/src/sys/linux/vsock.rs b/base/src/sys/linux/vsock.rs index 2cf9b31830..6a6b5d11c8 100644 --- a/base/src/sys/linux/vsock.rs +++ b/base/src/sys/linux/vsock.rs @@ -214,6 +214,7 @@ pub struct VsockSocket { impl VsockSocket { pub fn new() -> io::Result { + // SAFETY: trivially safe let fd = unsafe { libc::socket(libc::AF_VSOCK, libc::SOCK_STREAM | libc::SOCK_CLOEXEC, 0) }; if fd < 0 { Err(io::Error::last_os_error()) @@ -237,6 +238,7 @@ impl VsockSocket { ..Default::default() }; + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::bind( @@ -265,6 +267,7 @@ impl VsockSocket { ..Default::default() }; + // SAFETY: // Safe because this just connects a vsock socket, and the return value is checked. let ret = unsafe { libc::connect( @@ -282,6 +285,7 @@ impl VsockSocket { } pub fn listen(self) -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::listen(self.fd, 1) }; if ret < 0 { @@ -295,8 +299,9 @@ impl VsockSocket { pub fn local_port(&self) -> io::Result { let mut svm: sockaddr_vm = Default::default(); - // Safe because we give a valid pointer for addrlen and check the length. let mut addrlen = size_of::() as socklen_t; + // SAFETY: + // Safe because we give a valid pointer for addrlen and check the length. let ret = unsafe { // Get the socket address that was actually bound. libc::getsockname( @@ -317,6 +322,7 @@ impl VsockSocket { } pub fn try_clone(&self) -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let dup_fd = unsafe { libc::fcntl(self.fd, libc::F_DUPFD_CLOEXEC, 0) }; if dup_fd < 0 { @@ -327,6 +333,7 @@ impl VsockSocket { } pub fn set_nonblocking(&mut self, nonblocking: bool) -> io::Result<()> { + // SAFETY: // Safe because the fd is valid and owned by this stream. unsafe { set_nonblocking(self.fd, nonblocking) } } @@ -348,6 +355,7 @@ impl AsRawFd for VsockSocket { impl Drop for VsockSocket { fn drop(&mut self) { + // SAFETY: // Safe because this doesn't modify any memory and we are the only // owner of the file descriptor. unsafe { libc::close(self.fd) }; @@ -382,6 +390,7 @@ impl VsockStream { impl io::Read for VsockStream { fn read(&mut self, buf: &mut [u8]) -> io::Result { + // SAFETY: // Safe because this will only modify the contents of |buf| and we check the return value. let ret = unsafe { libc::read( @@ -400,6 +409,7 @@ impl io::Read for VsockStream { impl io::Write for VsockStream { fn write(&mut self, buf: &[u8]) -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::write( @@ -459,8 +469,9 @@ impl VsockListener { pub fn accept(&self) -> io::Result<(VsockStream, SocketAddr)> { let mut svm: sockaddr_vm = Default::default(); - // Safe because this will only modify |svm| and we check the return value. let mut socklen: socklen_t = size_of::() as socklen_t; + // SAFETY: + // Safe because this will only modify |svm| and we check the return value. let fd = unsafe { libc::accept4( self.sock.as_raw_fd(), diff --git a/base/src/sys/unix/descriptor.rs b/base/src/sys/unix/descriptor.rs index fac34252f4..67c0fac29a 100644 --- a/base/src/sys/unix/descriptor.rs +++ b/base/src/sys/unix/descriptor.rs @@ -43,6 +43,7 @@ pub fn clone_descriptor(descriptor: &dyn AsRawDescriptor) -> Result Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) }; if ret < 0 { @@ -60,6 +61,7 @@ pub fn clear_descriptor_cloexec(fd_owner: &A) -> Result<()> /// Clears CLOEXEC flag on fd fn clear_fd_cloexec(fd_owner: &A) -> Result<()> { let fd = fd_owner.as_raw_fd(); + // SAFETY: // Safe because fd is read only. let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) }; if flags == -1 { @@ -67,6 +69,7 @@ fn clear_fd_cloexec(fd_owner: &A) -> Result<()> { } let masked_flags = flags & !libc::FD_CLOEXEC; + // SAFETY: // Safe because this has no side effect(s) on the current process. if masked_flags != flags && unsafe { libc::fcntl(fd, libc::F_SETFD, masked_flags) } == -1 { errno_result() @@ -77,6 +80,8 @@ fn clear_fd_cloexec(fd_owner: &A) -> Result<()> { impl Drop for SafeDescriptor { fn drop(&mut self) { + // SAFETY: + // Safe because descriptor is valid. let _ = unsafe { libc::close(self.descriptor) }; } } @@ -101,6 +106,7 @@ impl SafeDescriptor { /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will /// share the same underlying count within the kernel. pub fn try_clone(&self) -> Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let descriptor = unsafe { libc::fcntl(self.descriptor, libc::F_DUPFD_CLOEXEC, 0) }; if descriptor < 0 { @@ -113,6 +119,7 @@ impl SafeDescriptor { impl From for File { fn from(s: SafeDescriptor) -> File { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { File::from_raw_fd(s.into_raw_descriptor()) } } @@ -120,6 +127,7 @@ impl From for File { impl From for TcpListener { fn from(s: SafeDescriptor) -> Self { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { Self::from_raw_fd(s.into_raw_descriptor()) } } @@ -127,6 +135,7 @@ impl From for TcpListener { impl From for TcpStream { fn from(s: SafeDescriptor) -> Self { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { Self::from_raw_fd(s.into_raw_descriptor()) } } @@ -134,6 +143,7 @@ impl From for TcpStream { impl From for UnixStream { fn from(s: SafeDescriptor) -> Self { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { Self::from_raw_fd(s.into_raw_descriptor()) } } diff --git a/base/src/sys/unix/fcntl.rs b/base/src/sys/unix/fcntl.rs index eb8cdfd6de..88235daa21 100644 --- a/base/src/sys/unix/fcntl.rs +++ b/base/src/sys/unix/fcntl.rs @@ -16,17 +16,24 @@ use crate::syscall; /// /// Returns an error if the OS indicates the flags can't be retrieved. fn get_fd_flags(fd: RawFd) -> Result { - // Safe because no third parameter is expected and we check the return result. - syscall!(unsafe { fcntl(fd, F_GETFL) }) + syscall!( + // SAFETY: + // Safe because no third parameter is expected and we check the return result. + unsafe { fcntl(fd, F_GETFL) } + ) } /// Sets the file flags set for the given `RawFD`. /// /// Returns an error if the OS indicates the flags can't be retrieved. fn set_fd_flags(fd: RawFd, flags: c_int) -> Result<()> { - // Safe because we supply the third parameter and we check the return result. - // fcntlt is trusted not to modify the memory of the calling process. - syscall!(unsafe { fcntl(fd, F_SETFL, flags) }).map(|_| ()) + syscall!( + // SAFETY: + // Safe because we supply the third parameter and we check the return result. + // fcntlt is trusted not to modify the memory of the calling process. + unsafe { fcntl(fd, F_SETFL, flags) } + ) + .map(|_| ()) } /// Performs a logical OR of the given flags with the FD's flags, setting the given bits for the diff --git a/base/src/sys/unix/file_traits.rs b/base/src/sys/unix/file_traits.rs index a173695fa0..93b1989da4 100644 --- a/base/src/sys/unix/file_traits.rs +++ b/base/src/sys/unix/file_traits.rs @@ -31,6 +31,7 @@ macro_rules! volatile_impl { ($ty:ty) => { impl FileReadWriteVolatile for $ty { fn read_volatile(&mut self, slice: $crate::VolatileSlice) -> std::io::Result { + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. let ret = unsafe { @@ -58,6 +59,7 @@ macro_rules! volatile_impl { return Ok(0); } + // SAFETY: // Safe because only bytes inside the buffers are accessed and the kernel is // expected to handle arbitrary memory for I/O. let ret = unsafe { @@ -75,6 +77,7 @@ macro_rules! volatile_impl { } fn write_volatile(&mut self, slice: $crate::VolatileSlice) -> std::io::Result { + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. let ret = unsafe { @@ -102,6 +105,7 @@ macro_rules! volatile_impl { return Ok(0); } + // SAFETY: // Safe because only bytes inside the buffers are accessed and the kernel is // expected to handle arbitrary memory for I/O. let ret = unsafe { @@ -130,6 +134,7 @@ macro_rules! volatile_at_impl { slice: $crate::VolatileSlice, offset: u64, ) -> std::io::Result { + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. let ret = unsafe { @@ -160,6 +165,7 @@ macro_rules! volatile_at_impl { return Ok(0); } + // SAFETY: // Safe because only bytes inside the buffers are accessed and the kernel is // expected to handle arbitrary memory for I/O. let ret = unsafe { @@ -182,6 +188,7 @@ macro_rules! volatile_at_impl { slice: $crate::VolatileSlice, offset: u64, ) -> std::io::Result { + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. let ret = unsafe { @@ -212,6 +219,7 @@ macro_rules! volatile_at_impl { return Ok(0); } + // SAFETY: // Safe because only bytes inside the buffers are accessed and the kernel is // expected to handle arbitrary memory for I/O. let ret = unsafe { diff --git a/base/src/sys/unix/handle_eintr.rs b/base/src/sys/unix/handle_eintr.rs index ad55d83a5b..f6168eff00 100644 --- a/base/src/sys/unix/handle_eintr.rs +++ b/base/src/sys/unix/handle_eintr.rs @@ -189,6 +189,7 @@ mod tests { libc::__error() } + // SAFETY: trivially safe unsafe { *errno_location() = e; } diff --git a/base/src/sys/unix/net.rs b/base/src/sys/unix/net.rs index ebb5d68740..1afa366493 100644 --- a/base/src/sys/unix/net.rs +++ b/base/src/sys/unix/net.rs @@ -81,9 +81,11 @@ pub(in crate::sys) fn socket( sock_type: c_int, protocol: c_int, ) -> io::Result { + // SAFETY: // Safe socket initialization since we handle the returned error. match unsafe { libc::socket(domain, sock_type, protocol) } { -1 => Err(io::Error::last_os_error()), + // SAFETY: // Safe because we own the file descriptor. fd => Ok(unsafe { SafeDescriptor::from_raw_descriptor(fd) }), } @@ -95,16 +97,20 @@ pub(in crate::sys) fn socketpair( protocol: c_int, ) -> io::Result<(SafeDescriptor, SafeDescriptor)> { let mut fds = [0, 0]; + // SAFETY: // Safe because we give enough space to store all the fds and we check the return value. match unsafe { libc::socketpair(domain, sock_type, protocol, fds.as_mut_ptr()) } { -1 => Err(io::Error::last_os_error()), - // Safe because we own the file descriptors. - _ => Ok(unsafe { - ( - SafeDescriptor::from_raw_descriptor(fds[0]), - SafeDescriptor::from_raw_descriptor(fds[1]), - ) - }), + _ => Ok( + // SAFETY: + // Safe because we own the file descriptors. + unsafe { + ( + SafeDescriptor::from_raw_descriptor(fds[0]), + SafeDescriptor::from_raw_descriptor(fds[1]), + ) + }, + ), } } @@ -130,6 +136,7 @@ impl TcpSocket { let ret = match sockaddr { SocketAddr::V4(a) => { let sin = sockaddrv4_to_lib_c(&a); + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. unsafe { libc::bind( @@ -141,6 +148,7 @@ impl TcpSocket { } SocketAddr::V6(a) => { let sin6 = sockaddrv6_to_lib_c(&a); + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. unsafe { libc::bind( @@ -169,6 +177,7 @@ impl TcpSocket { let ret = match sockaddr { SocketAddr::V4(a) => { let sin = sockaddrv4_to_lib_c(&a); + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. unsafe { libc::connect( @@ -180,6 +189,7 @@ impl TcpSocket { } SocketAddr::V6(a) => { let sin6 = sockaddrv6_to_lib_c(&a); + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. unsafe { libc::connect( @@ -200,6 +210,7 @@ impl TcpSocket { } pub fn listen(self) -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::listen(self.as_raw_descriptor(), 1) }; if ret < 0 { @@ -216,8 +227,9 @@ impl TcpSocket { InetVersion::V4 => { let mut sin = sockaddrv4_to_lib_c(&SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)); - // Safe because we give a valid pointer for addrlen and check the length. let mut addrlen = size_of::() as socklen_t; + // SAFETY: + // Safe because we give a valid pointer for addrlen and check the length. let ret = unsafe { // Get the socket address that was actually bound. libc::getsockname( @@ -244,8 +256,9 @@ impl TcpSocket { 0, )); - // Safe because we give a valid pointer for addrlen and check the length. let mut addrlen = size_of::() as socklen_t; + // SAFETY: + // Safe because we give a valid pointer for addrlen and check the length. let ret = unsafe { // Get the socket address that was actually bound. libc::getsockname( @@ -279,6 +292,7 @@ pub(in crate::sys) fn sun_path_offset() -> usize { // Prefer 0 to null() so that we do not need to subtract from the `sub_path` pointer. #[allow(clippy::zero_ptr)] let addr = 0 as *const libc::sockaddr_un; + // SAFETY: // Safe because we only use the dereference to create a pointer to the desired field in // calculating the offset. unsafe { &(*addr).sun_path as *const _ as usize } @@ -302,6 +316,7 @@ impl UnixSeqpacket { pub fn connect>(path: P) -> io::Result { let descriptor = socket(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0)?; let (addr, len) = sockaddr_un(path.as_ref())?; + // SAFETY: // Safe connect since we handle the error and use the right length generated from // `sockaddr_un`. unsafe { @@ -325,6 +340,8 @@ impl UnixSeqpacket { /// Gets the number of bytes that can be read from this socket without blocking. pub fn get_readable_bytes(&self) -> io::Result { let mut byte_count = 0i32; + // SAFETY: + // Safe because self has valid raw descriptor and return value are checked. let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONREAD, &mut byte_count) }; if ret < 0 { Err(io::Error::last_os_error()) @@ -345,6 +362,7 @@ impl UnixSeqpacket { #[cfg(debug_assertions)] let buf = &mut 0 as *mut _ as *mut _; + // SAFETY: // This form of recvfrom doesn't modify any data because all null pointers are used. We only // use the return value and check for errors on an FD owned by this structure. let ret = unsafe { @@ -375,6 +393,7 @@ impl UnixSeqpacket { /// # Errors /// Returns error when `libc::write` failed. pub fn send(&self, buf: &[u8]) -> io::Result { + // SAFETY: // Safe since we make sure the input `count` == `buf.len()` and handle the returned error. unsafe { let ret = libc::write( @@ -401,6 +420,7 @@ impl UnixSeqpacket { /// # Errors /// Returns error when `libc::read` failed. pub fn recv(&self, buf: &mut [u8]) -> io::Result { + // SAFETY: // Safe since we make sure the input `count` == `buf.len()` and handle the returned error. unsafe { let ret = libc::read( @@ -466,6 +486,7 @@ impl UnixSeqpacket { tv_usec: 0, }, }; + // SAFETY: // Safe because we own the fd, and the length of the pointer's data is the same as the // passed in length parameter. The level argument is valid, the kind is assumed to be valid, // and the return value is checked. @@ -498,6 +519,7 @@ impl UnixSeqpacket { /// Sets the blocking mode for this socket. pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { let mut nonblocking = nonblocking as libc::c_int; + // SAFETY: // Safe because the return value is checked, and this ioctl call sets the nonblocking mode // and does not continue holding the file descriptor after the call. let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONBIO, &mut nonblocking) }; @@ -575,6 +597,7 @@ impl UnixSeqpacketListener { .expect("fd should be an integer"); let mut result: c_int = 0; let mut result_len = size_of::() as libc::socklen_t; + // SAFETY: Safe because fd and other args are valid and the return value is checked. let ret = unsafe { libc::getsockopt( fd, @@ -593,6 +616,7 @@ impl UnixSeqpacketListener { "specified descriptor is not a listening socket", )); } + // SAFETY: // Safe because we validated the socket file descriptor. let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; return Ok(UnixSeqpacketListener { @@ -604,6 +628,7 @@ impl UnixSeqpacketListener { let descriptor = socket(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0)?; let (addr, len) = sockaddr_un(path.as_ref())?; + // SAFETY: // Safe connect since we handle the error and use the right length generated from // `sockaddr_un`. unsafe { @@ -638,6 +663,7 @@ impl UnixSeqpacketListener { let elapsed = Instant::now().saturating_duration_since(start); let remaining = timeout.checked_sub(elapsed).unwrap_or(Duration::ZERO); let cur_timeout_ms = i32::try_from(remaining.as_millis()).unwrap_or(i32::MAX); + // SAFETY: // Safe because we give a valid pointer to a list (of 1) FD and we check // the return value. match unsafe { libc::poll(&mut fds, 1, cur_timeout_ms) }.cmp(&0) { @@ -665,6 +691,7 @@ impl UnixSeqpacketListener { - &addr.sun_family as *const _ as usize) as libc::socklen_t; let mut len = mem::size_of::() as libc::socklen_t; + // SAFETY: // Safe because the length given matches the length of the data of the given pointer, and we // check the return value. let ret = unsafe { @@ -699,6 +726,7 @@ impl UnixSeqpacketListener { /// Sets the blocking mode for this socket. pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { let mut nonblocking = nonblocking as libc::c_int; + // SAFETY: // Safe because the return value is checked, and this ioctl call sets the nonblocking mode // and does not continue holding the file descriptor after the call. let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONBIO, &mut nonblocking) }; diff --git a/base/src/sys/unix/sock_ctrl_msg.rs b/base/src/sys/unix/sock_ctrl_msg.rs index 6bd66442c0..ce2630b8cc 100644 --- a/base/src/sys/unix/sock_ctrl_msg.rs +++ b/base/src/sys/unix/sock_ctrl_msg.rs @@ -100,6 +100,7 @@ impl CmsgBuffer { } else { CmsgBuffer::Heap( vec![ + // SAFETY: // Safe because cmsghdr only contains primitive types for // which zero initialization is valid. unsafe { MaybeUninit::::zeroed().assume_init() }; @@ -125,6 +126,7 @@ fn raw_sendmsg(fd: RawFd, iovec: &[iovec], out_fds: &[RawFd]) -> io::Result io::Result io::Result io::Result()); let mut cmsg_buffer = CmsgBuffer::with_capacity(cmsg_capacity); + // SAFETY: // msghdr on musl has private __pad1 and __pad2 fields that cannot be initialized. // Safe because msghdr only contains primitive types for which zero // initialization is valid. @@ -190,6 +198,7 @@ fn raw_recvmsg( msg.msg_controllen = cmsg_capacity.try_into().unwrap(); } + // SAFETY: // Safe because the msghdr was properly constructed from valid (or null) pointers of the // indicated length and we check the return value. let total_read = unsafe { recvmsg(fd, &mut msg, 0) }; @@ -205,6 +214,7 @@ fn raw_recvmsg( let mut cmsg_ptr = msg.msg_control as *mut cmsghdr; let mut in_fds: Vec = Vec::with_capacity(max_fds); while !cmsg_ptr.is_null() { + // SAFETY: // Safe because we checked that cmsg_ptr was non-null, and the loop is constructed such that // that only happens when there is at least sizeof(cmsghdr) space after the pointer to read. let cmsg = unsafe { (cmsg_ptr as *mut cmsghdr).read_unaligned() }; @@ -378,6 +388,7 @@ pub unsafe trait AsIobuf: Sized { fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec]; } +// SAFETY: // Safe because there are no other mutable references to the memory described by `IoSlice` and it is // guaranteed to be ABI-compatible with `iovec`. unsafe impl<'a> AsIobuf for IoSlice<'a> { @@ -389,16 +400,19 @@ unsafe impl<'a> AsIobuf for IoSlice<'a> { } fn as_iobuf_slice(bufs: &[Self]) -> &[iovec] { + // SAFETY: // Safe because `IoSlice` is guaranteed to be ABI-compatible with `iovec`. unsafe { slice::from_raw_parts(bufs.as_ptr() as *const iovec, bufs.len()) } } fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec] { + // SAFETY: // Safe because `IoSlice` is guaranteed to be ABI-compatible with `iovec`. unsafe { slice::from_raw_parts_mut(bufs.as_mut_ptr() as *mut iovec, bufs.len()) } } } +// SAFETY: // Safe because there are no other references to the memory described by `IoSliceMut` and it is // guaranteed to be ABI-compatible with `iovec`. unsafe impl<'a> AsIobuf for IoSliceMut<'a> { @@ -410,16 +424,19 @@ unsafe impl<'a> AsIobuf for IoSliceMut<'a> { } fn as_iobuf_slice(bufs: &[Self]) -> &[iovec] { + // SAFETY: // Safe because `IoSliceMut` is guaranteed to be ABI-compatible with `iovec`. unsafe { slice::from_raw_parts(bufs.as_ptr() as *const iovec, bufs.len()) } } fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec] { + // SAFETY: // Safe because `IoSliceMut` is guaranteed to be ABI-compatible with `iovec`. unsafe { slice::from_raw_parts_mut(bufs.as_mut_ptr() as *mut iovec, bufs.len()) } } } +// SAFETY: // Safe because volatile slices are only ever accessed with other volatile interfaces and the // pointer and size are guaranteed to be accurate. unsafe impl<'a> AsIobuf for VolatileSlice<'a> { @@ -455,6 +472,7 @@ mod tests { ($len:literal) => { assert_eq!( CMSG_SPACE(size_of::<[RawFd; $len]>()) as libc::c_uint, + // SAFETY: trivially safe unsafe { libc::CMSG_SPACE(size_of::<[RawFd; $len]>() as libc::c_uint) } ); }; @@ -530,6 +548,7 @@ mod tests { assert_ne!(file.as_raw_fd(), s2.as_raw_descriptor()); assert_ne!(file.as_raw_fd(), evt.as_raw_descriptor()); + // SAFETY: trivially safe file.write_all(unsafe { from_raw_parts(&1203u64 as *const u64 as *const u8, 8) }) .expect("failed to write to sent fd"); @@ -564,6 +583,7 @@ mod tests { let mut file = File::from(files.swap_remove(0)); + // SAFETY: trivially safe file.write_all(unsafe { from_raw_parts(&1203u64 as *const u64 as *const u8, 8) }) .expect("failed to write to sent fd"); diff --git a/base/src/sys/unix/stream_channel.rs b/base/src/sys/unix/stream_channel.rs index ec78469ecb..9ac7a55cef 100644 --- a/base/src/sys/unix/stream_channel.rs +++ b/base/src/sys/unix/stream_channel.rs @@ -91,6 +91,7 @@ impl StreamChannel { // (see sys::decode_error_kind) on Windows, so we preserve this behavior on POSIX even // though one could argue ErrorKind::UnexpectedEof is a closer match to the true error. SocketType::Message(sock) => { + // SAFETY: // Safe because buf is valid, we pass buf's size to recv to bound the return // length, and we check the return code. let retval = unsafe { diff --git a/base/src/sys/unix/system_info.rs b/base/src/sys/unix/system_info.rs index c0da3570b9..e4aa014767 100644 --- a/base/src/sys/unix/system_info.rs +++ b/base/src/sys/unix/system_info.rs @@ -12,6 +12,7 @@ use crate::Result; /// Safe wrapper for `sysconf(_SC_IOV_MAX)`. #[inline(always)] pub fn iov_max() -> usize { + // SAFETY: // Trivially safe unsafe { sysconf(_SC_IOV_MAX) as usize } } @@ -19,6 +20,7 @@ pub fn iov_max() -> usize { /// Safe wrapper for `sysconf(_SC_PAGESIZE)`. #[inline(always)] pub fn pagesize() -> usize { + // SAFETY: // Trivially safe unsafe { sysconf(_SC_PAGESIZE) as usize } } @@ -26,6 +28,7 @@ pub fn pagesize() -> usize { /// Returns the number of online logical cores on the system. #[inline(always)] pub fn number_of_logical_cores() -> Result { + // SAFETY: // Safe because we pass a flag for this call and the host supports this system call Ok(unsafe { sysconf(_SC_NPROCESSORS_CONF) } as usize) } diff --git a/base/src/sys/windows/console.rs b/base/src/sys/windows/console.rs index b98f585e0b..4d8e6d351b 100644 --- a/base/src/sys/windows/console.rs +++ b/base/src/sys/windows/console.rs @@ -25,6 +25,7 @@ impl Console { impl Read for Console { fn read(&mut self, out: &mut [u8]) -> Result { let mut num_of_bytes_read: u32 = 0; + // SAFETY: // Safe because `out` is guarenteed to be a valid mutable array // and `num_of_bytes_read` is a valid u32. let res = unsafe { diff --git a/base/src/sys/windows/descriptor.rs b/base/src/sys/windows/descriptor.rs index e9f09d7fb2..d15cb08a3b 100644 --- a/base/src/sys/windows/descriptor.rs +++ b/base/src/sys/windows/descriptor.rs @@ -46,6 +46,7 @@ impl PartialEq for SafeDescriptor { impl Drop for SafeDescriptor { fn drop(&mut self) { + // SAFETY: trivially safe unsafe { CloseHandle(self.descriptor) }; } } @@ -61,11 +62,13 @@ static mut KERNELBASE_LIBRARY: MaybeUninit = MaybeUninit::uninit(); fn compare_object_handles(first: RawHandle, second: RawHandle) -> bool { KERNELBASE_INIT.call_once(|| { + // SAFETY: trivially safe unsafe { *KERNELBASE_LIBRARY.as_mut_ptr() = libloaderapi::LoadLibraryW(win32_wide_string("Kernelbase").as_ptr()); }; }); + // SAFETY: the return value is checked. let handle = unsafe { KERNELBASE_LIBRARY.assume_init() }; if handle.is_null() { return first == second; @@ -73,11 +76,13 @@ fn compare_object_handles(first: RawHandle, second: RawHandle) -> bool { let addr = CString::new("CompareObjectHandles").unwrap(); let addr_ptr = addr.as_ptr(); + // SAFETY: the return value is checked. let symbol = unsafe { libloaderapi::GetProcAddress(handle, addr_ptr) }; if symbol.is_null() { return first == second; } + // SAFETY: trivially safe let func = unsafe { std::mem::transmute::< *mut winapi::shared::minwindef::__some_function, @@ -102,6 +107,7 @@ impl SafeDescriptor { /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will /// share the same underlying count within the kernel. pub fn try_clone(&self) -> Result { + // SAFETY: // Safe because `duplicate_handle` will return a valid handle, or at the very least error // out. Ok(unsafe { @@ -110,15 +116,19 @@ impl SafeDescriptor { } } +// SAFETY: // On Windows, RawHandles are represented by raw pointers but are not used as such in // rust code, and are therefore safe to send between threads. unsafe impl Send for SafeDescriptor {} +// SAFETY: See comments for impl Send unsafe impl Sync for SafeDescriptor {} +// SAFETY: // On Windows, RawHandles are represented by raw pointers but are opaque to the // userspace and cannot be derefenced by rust code, and are therefore safe to // send between threads. unsafe impl Send for Descriptor {} +// SAFETY: See comments for impl Send unsafe impl Sync for Descriptor {} macro_rules! AsRawDescriptor { @@ -134,6 +144,7 @@ macro_rules! AsRawDescriptor { macro_rules! FromRawDescriptor { ($name:ident) => { impl FromRawDescriptor for $name { + // SAFETY: It is caller's responsibility to ensure that the descriptor is valid. unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { return $name::from_raw_handle(descriptor); } @@ -171,6 +182,7 @@ fn clone_equality() { use crate::Event; let evt = Event::new().unwrap(); + // SAFETY: Given evt is created above and is valid. let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(evt.into_raw_descriptor()) }; assert_eq!(descriptor, descriptor); @@ -181,6 +193,7 @@ fn clone_equality() { ); let evt2 = Event::new().unwrap(); + // SAFETY: Given evt2 is created above and is valid. let another = unsafe { SafeDescriptor::from_raw_descriptor(evt2.into_raw_descriptor()) }; assert_ne!(descriptor, another); diff --git a/base/src/sys/windows/event.rs b/base/src/sys/windows/event.rs index 9566231add..49a2a474ec 100644 --- a/base/src/sys/windows/event.rs +++ b/base/src/sys/windows/event.rs @@ -73,6 +73,7 @@ impl EventExt for Event { impl PlatformEvent { pub fn new_with_manual_reset(manual_reset: bool) -> Result { + // SAFETY: Safe because return value is checked. let handle = unsafe { CreateEventA( SecurityAttributes::new_with_security_descriptor( @@ -89,12 +90,15 @@ impl PlatformEvent { return errno_result(); } Ok(PlatformEvent { - event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) }, + event_handle: + // SAFETY: Safe because the descriptor is valid. + unsafe { SafeDescriptor::from_raw_descriptor(handle) }, }) } pub fn create_event_with_name(name: &str) -> Result { let event_str = CString::new(String::from(name)).unwrap(); + // SAFETY: Safe because return value is checked. let handle = unsafe { CreateEventA( SecurityAttributes::new_with_security_descriptor( @@ -111,7 +115,9 @@ impl PlatformEvent { return errno_result(); } Ok(PlatformEvent { - event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) }, + event_handle: + // SAFETY: Safe because the descriptor is valid. + unsafe { SafeDescriptor::from_raw_descriptor(handle) }, }) } @@ -122,17 +128,21 @@ impl PlatformEvent { pub fn open(name: &str) -> Result { let event_str = CString::new(String::from(name)).unwrap(); + // SAFETY: Safe because return value is checked. let handle = unsafe { OpenEventA(EVENT_MODIFY_STATE, FALSE, event_str.as_ptr()) }; if handle.is_null() { return errno_result(); } Ok(PlatformEvent { - event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) }, + event_handle: + // SAFETY: Safe because the descriptor is valid. + unsafe { SafeDescriptor::from_raw_descriptor(handle) }, }) } /// See `Event::signal`. pub fn signal(&self) -> Result<()> { + // SAFETY: Safe because the descriptor is valid. let event_result = unsafe { SetEvent(self.event_handle.as_raw_descriptor()) }; if event_result == 0 { return errno_result(); @@ -141,6 +151,7 @@ impl PlatformEvent { } pub fn reset(&self) -> Result<()> { + // SAFETY: Safe because the descriptor is valid. let res = unsafe { ResetEvent(self.event_handle.as_raw_descriptor()) }; if res == 0 { errno_result() @@ -156,6 +167,7 @@ impl PlatformEvent { None => INFINITE, }; + // SAFETY: // Safe because we pass an event object handle owned by this PlatformEvent. let wait_result = match unsafe { WaitForSingleObject(self.event_handle.as_raw_descriptor(), milliseconds) @@ -189,6 +201,7 @@ impl PlatformEvent { pub fn try_clone(&self) -> Result { let mut event_clone: HANDLE = MaybeUninit::uninit().as_mut_ptr(); + // SAFETY: Safe because return value is checked. let duplicate_result = unsafe { DuplicateHandle( GetCurrentProcess(), @@ -203,7 +216,10 @@ impl PlatformEvent { if duplicate_result == 0 { return errno_result(); } - Ok(unsafe { PlatformEvent::from_raw_descriptor(event_clone) }) + Ok( + // SAFETY: Safe because the descriptor is valid. + unsafe { PlatformEvent::from_raw_descriptor(event_clone) }, + ) } } @@ -214,6 +230,7 @@ impl AsRawDescriptor for PlatformEvent { } impl FromRawDescriptor for PlatformEvent { + // SAFETY: Safe because the descriptor is expected to be valid. unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { PlatformEvent { event_handle: SafeDescriptor::from_raw_descriptor(descriptor), @@ -245,10 +262,12 @@ impl From for PlatformEvent { } } +// Safety: // PlatformEvent is safe for send & Sync despite containing a raw handle to its // file mapping object. As long as the instance to PlatformEvent stays alive, this // pointer will be a valid handle. unsafe impl Send for PlatformEvent {} +// Safety: See comments for impl Send unsafe impl Sync for PlatformEvent {} #[cfg(test)] @@ -277,10 +296,12 @@ mod tests { evt.signal().unwrap(); // Wait for the notification. + // SAFETY: Safe because return value is checked. let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), INFINITE) }; assert_eq!(result, WAIT_OBJECT_0); // The notification should have reset since we already received it. + // SAFETY: Safe because return value is checked. let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) }; assert_eq!(result, WAIT_TIMEOUT); } @@ -291,15 +312,18 @@ mod tests { evt.signal().unwrap(); // Wait for the notification. + // SAFETY: Safe because return value is checked. let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), INFINITE) }; assert_eq!(result, WAIT_OBJECT_0); // The notification should still be active because read wasn't called. + // SAFETY: Safe because return value is checked. let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) }; assert_eq!(result, WAIT_OBJECT_0); // Read and ensure the notification has cleared. evt.wait().expect("Failed to read event."); + // SAFETY: Safe because return value is checked. let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) }; assert_eq!(result, WAIT_TIMEOUT); } diff --git a/base/src/sys/windows/file_traits.rs b/base/src/sys/windows/file_traits.rs index bd7ab301b2..3e9cdc6894 100644 --- a/base/src/sys/windows/file_traits.rs +++ b/base/src/sys/windows/file_traits.rs @@ -15,9 +15,10 @@ use crate::WriteZeroesAt; impl FileReadWriteVolatile for File { fn read_volatile(&mut self, slice: VolatileSlice) -> Result { + let mut bytes = 0; + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. - let mut bytes = 0; let ret = unsafe { winapi::um::fileapi::ReadFile( self.as_raw_descriptor(), @@ -55,9 +56,10 @@ impl FileReadWriteVolatile for File { } fn write_volatile(&mut self, slice: VolatileSlice) -> Result { + let mut bytes = 0; + // SAFETY: // Safe because only bytes inside the slice are accessed and the kernel is expected // to handle arbitrary memory for I/O. - let mut bytes = 0; let ret = unsafe { winapi::um::fileapi::WriteFile( self.as_raw_descriptor(), @@ -101,10 +103,11 @@ impl FileReadWriteAtVolatile for File { // The unix implementation uses pread, which doesn't modify the file // pointer. Windows doesn't have an option for that, unfortunately. - // Safe because only bytes inside the slice are accessed and the kernel is expected - // to handle arbitrary memory for I/O. let mut bytes = 0; + // SAFETY: + // Safe because only bytes inside the slice are accessed and the kernel is expected + // to handle arbitrary memory for I/O. let ret = unsafe { let mut overlapped: winapi::um::minwinbase::OVERLAPPED = std::mem::zeroed(); overlapped.u.s_mut().Offset = offset as u32; @@ -149,10 +152,11 @@ impl FileReadWriteAtVolatile for File { // The unix implementation uses pwrite, which doesn't modify the file // pointer. Windows doesn't have an option for that, unfortunately. - // Safe because only bytes inside the slice are accessed and the kernel is expected - // to handle arbitrary memory for I/O. let mut bytes = 0; + // SAFETY: + // Safe because only bytes inside the slice are accessed and the kernel is expected + // to handle arbitrary memory for I/O. let ret = unsafe { let mut overlapped: winapi::um::minwinbase::OVERLAPPED = std::mem::zeroed(); overlapped.u.s_mut().Offset = offset as u32; diff --git a/base/src/sys/windows/file_util.rs b/base/src/sys/windows/file_util.rs index 0e0b8b387c..3e44feaa19 100644 --- a/base/src/sys/windows/file_util.rs +++ b/base/src/sys/windows/file_util.rs @@ -34,6 +34,7 @@ pub fn open_file_or_duplicate>(path: P, options: &OpenOptions) -> /// # Safety /// handle *must* be File. We accept all AsRawDescriptors for convenience. pub fn set_sparse_file(handle: &T) -> io::Result<()> { + // SAFETY: // Safe because we check the return value and handle is guaranteed to be a // valid file handle by the caller. let result = unsafe { @@ -60,7 +61,7 @@ struct FileAllocatedRangeBuffer { /// # Safety /// Within this scope it is not possible to use LARGE_INTEGER as something else. fn large_integer_as_u64(lint: &LARGE_INTEGER) -> u64 { - // # Safety + // SAFETY: // Safe because we use LARGE_INTEGER only as i64 or as u64 within this scope. unsafe { *lint.QuadPart() as u64 } } @@ -90,7 +91,7 @@ pub fn get_allocated_ranges(descriptor: &T) -> Result(descriptor: &T) -> Result Result<()> { + // SAFETY: // Safe because this API does not modify memory, and process_id remains in scope for // the duration of the call. match unsafe { AllowSetForegroundWindow(process_id) } { diff --git a/base/src/sys/windows/ioctl.rs b/base/src/sys/windows/ioctl.rs index 3f643dee77..8591f77bad 100644 --- a/base/src/sys/windows/ioctl.rs +++ b/base/src/sys/windows/ioctl.rs @@ -372,6 +372,7 @@ mod tests { f.sync_all().expect("Failed to sync all."); // read the compression status + // SAFETY: safe because return value is checked. let ecode = unsafe { super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed) }; @@ -396,6 +397,7 @@ mod tests { // https://github.com/rust-lang/rust/blob/master/src/libstd/sys/windows/fs.rs#L260 // For now I'm just going to leave this test as-is. // + // SAFETY: safe because return value is checked. let f = unsafe { File::from_raw_handle(CreateFileW( to_u16s(file_path).unwrap().as_ptr(), @@ -410,6 +412,7 @@ mod tests { }; let ecode = + // SAFETY: safe because return value is checked. unsafe { super::super::ioctl::ioctl_with_ref(&f, FSCTL_SET_COMPRESSION, &compressed) }; assert_eq!(ecode, 0); @@ -418,6 +421,7 @@ mod tests { // is writing anything to the compressed pointer. compressed = 0; + // SAFETY: safe because return value is checked. let ecode = unsafe { super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed) }; @@ -462,6 +466,7 @@ mod tests { // https://github.com/rust-lang/rust/blob/master/src/libstd/sys/windows/fs.rs#L260 // For now I'm just going to leave this test as-is. // + // SAFETY: safe because return value is checked. let f = unsafe { File::from_raw_handle(CreateFileW( to_u16s(file_path).unwrap().as_ptr(), @@ -477,6 +482,7 @@ mod tests { // now we call ioctl_with_val, which isn't particularly any more helpful than // ioctl_with_ref except for the cases where the input is only a word long + // SAFETY: safe because return value is checked. let ecode = unsafe { super::super::ioctl::ioctl_with_val(&f, FSCTL_SET_COMPRESSION, compressed.into()) }; @@ -487,6 +493,7 @@ mod tests { // is writing anything to the compressed pointer. compressed = 0; + // SAFETY: safe because return value is checked. let ecode = unsafe { super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed) }; diff --git a/base/src/sys/windows/mmap.rs b/base/src/sys/windows/mmap.rs index ed6d6b34ca..236e680ff5 100644 --- a/base/src/sys/windows/mmap.rs +++ b/base/src/sys/windows/mmap.rs @@ -41,6 +41,7 @@ impl dyn MappedRegion { pub fn msync(&self, offset: usize, size: usize) -> Result<()> { validate_includes_range(self.size(), offset, size)?; + // SAFETY: // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size // are correct, and we've validated that `offset`..`offset+size` is in the range owned by // this `MappedRegion`. @@ -68,11 +69,13 @@ pub struct MemoryMapping { pub(crate) size: usize, } +// SAFETY: // Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MemoryMapping {} +// SAFETY: See comments for impl Send unsafe impl Sync for MemoryMapping {} impl MemoryMapping { @@ -123,6 +126,9 @@ impl MemoryMapping { } } +// SAFETY: +// Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't +// be unmapped until it's Dropped. unsafe impl MappedRegion for MemoryMapping { fn as_ptr(&self) -> *mut u8 { self.addr as *mut u8 @@ -171,6 +177,7 @@ impl<'a> MemoryMappingBuilder<'a> { // handle for it first. That handle is then provided to Self::wrap, which // performs the actual mmap (creating a mapped view). // + // SAFETY: // Safe because self.descriptor is guaranteed to be a valid handle. let mapping_handle = unsafe { create_file_mapping( @@ -182,6 +189,7 @@ impl<'a> MemoryMappingBuilder<'a> { } .map_err(Error::StdSyscallFailed)?; + // SAFETY: // The above comment block is why the SafeDescriptor wrap is safe. Some(unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) }) } else { @@ -219,6 +227,7 @@ impl<'a> MemoryMappingBuilder<'a> { file_descriptor: Option<&'a dyn AsRawDescriptor>, ) -> Result { let file_descriptor = match file_descriptor { + // SAFETY: // Safe because `duplicate_handle` will return a handle or at least error out. Some(descriptor) => unsafe { Some(SafeDescriptor::from_raw_descriptor( @@ -280,6 +289,7 @@ mod tests { let shm = SharedMemory::new("test", 1028).unwrap(); let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap()); let s = m.get_slice(2, 3).unwrap(); + // SAFETY: trivially safe assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) }); } diff --git a/base/src/sys/windows/mmap_platform.rs b/base/src/sys/windows/mmap_platform.rs index aba1b00016..3f821c4498 100644 --- a/base/src/sys/windows/mmap_platform.rs +++ b/base/src/sys/windows/mmap_platform.rs @@ -46,6 +46,7 @@ impl MemoryMapping { /// * `size` - Size of memory region in bytes. /// * `prot` - Protection (e.g. readable/writable) of the memory region. pub fn new_protection(size: usize, prot: Protection) -> Result { + // SAFETY: // This is safe because we are creating an anonymous mapping in a place not already used by // any other area in this process. unsafe { MemoryMapping::try_mmap(None, size, prot.into(), None) } @@ -64,6 +65,7 @@ impl MemoryMapping { file_handle: RawDescriptor, size: usize, ) -> Result { + // SAFETY: // This is safe because we are creating an anonymous mapping in a place not already used by // any other area in this process. unsafe { @@ -89,6 +91,7 @@ impl MemoryMapping { offset: u64, prot: Protection, ) -> Result { + // SAFETY: // This is safe because we are creating an anonymous mapping in a place not already used by // any other area in this process. unsafe { @@ -198,6 +201,7 @@ impl MemoryMapping { /// Calls FlushViewOfFile on the mapped memory range, ensuring all changes that would /// be written to disk are written immediately pub fn msync(&self) -> Result<()> { + // SAFETY: // Safe because self can only be created as a successful memory mapping unsafe { if FlushViewOfFile(self.addr, self.size) == 0 { @@ -210,6 +214,7 @@ impl MemoryMapping { impl Drop for MemoryMapping { fn drop(&mut self) { + // SAFETY: // This is safe because we MapViewOfFile the area at addr ourselves, and nobody // else is holding a reference to it. unsafe { @@ -240,6 +245,7 @@ mod tests { #[test] fn map_invalid_fd() { + // SAFETY: trivially safe to create an invalid File. let descriptor = unsafe { std::fs::File::from_raw_descriptor(ptr::null_mut()) }; let res = MemoryMappingBuilder::new(1024) .from_file(&descriptor) diff --git a/base/src/sys/windows/multi_process_mutex.rs b/base/src/sys/windows/multi_process_mutex.rs index 64a5ba83c8..613a7a5022 100644 --- a/base/src/sys/windows/multi_process_mutex.rs +++ b/base/src/sys/windows/multi_process_mutex.rs @@ -29,7 +29,7 @@ pub struct MultiProcessMutex { impl MultiProcessMutex { pub fn new() -> Result { - // Trivially safe (no memory passed, error checked). + // SAFETY: Trivially safe (no memory passed, error checked). // // Note that we intentionally make this handle uninheritable by default via the mutex attrs. let lock_handle = unsafe { @@ -44,6 +44,7 @@ impl MultiProcessMutex { Err(Error::last()) } else { Ok(Self { + // SAFETY: // Safe because the handle is valid & we own it exclusively. lock: unsafe { SafeDescriptor::from_raw_descriptor(lock_handle) }, }) @@ -63,6 +64,7 @@ impl MultiProcessMutex { /// Tries to lock the mutex, returning a RAII guard similar to std::sync::Mutex if we obtained /// the lock within the timeout. pub fn try_lock(&self, timeout_ms: u32) -> Option { + // SAFETY: // Safe because the mutex handle is guaranteed to exist. match unsafe { WaitForSingleObject(self.lock.as_raw_descriptor(), timeout_ms) } { WAIT_OBJECT_0 => Some(MultiProcessMutexGuard { lock: &self.lock }), @@ -93,6 +95,7 @@ pub struct MultiProcessMutexGuard<'a> { impl<'a> Drop for MultiProcessMutexGuard<'a> { fn drop(&mut self) { + // SAFETY: We own the descriptor and is expected to be valid. if unsafe { ReleaseMutex(self.lock.as_raw_descriptor()) } == 0 { panic!("Failed to unlock mutex: {:?}.", Error::last()) } diff --git a/base/src/sys/windows/named_pipes.rs b/base/src/sys/windows/named_pipes.rs index 7f65c7d1b5..eb8e1f304d 100644 --- a/base/src/sys/windows/named_pipes.rs +++ b/base/src/sys/windows/named_pipes.rs @@ -137,6 +137,7 @@ impl OverlappedWrapper { } } +// SAFETY: // Safe because all of the contained fields may be safely sent to another thread. unsafe impl Send for OverlappedWrapper {} @@ -242,6 +243,7 @@ impl From<&BlockingMode> for DWORD { } /// Sets the handle state for a named pipe in a rust friendly way. +/// SAFETY: /// This is safe if the pipe handle is open. unsafe fn set_named_pipe_handle_state( pipe_handle: RawDescriptor, @@ -377,9 +379,10 @@ pub fn create_server_pipe( // This sets flags so there will be an error if >1 instance (server end) // of this pipe name is opened because we expect exactly one. + // SAFETY: + // Safe because security attributes are valid, pipe_name is valid C string, + // and we're checking the return code let server_handle = unsafe { - // Safe because security attributes are valid, pipe_name is valid C string, - // and we're checking the return code CreateNamedPipeA( c_pipe_name.as_ptr(), /* dwOpenMode= */ @@ -405,6 +408,7 @@ pub fn create_server_pipe( if server_handle == INVALID_HANDLE_VALUE { Err(io::Error::last_os_error()) } else { + // SAFETY: Safe because server_handle is valid. unsafe { Ok(PipeConnection { handle: SafeDescriptor::from_raw_descriptor(server_handle), @@ -448,12 +452,14 @@ pub fn create_client_pipe( let mut client_mode = framing_mode.to_readmode() | DWORD::from(blocking_mode); + // SAFETY: // Safe because client_handle's open() call did not return an error. unsafe { set_named_pipe_handle_state(client_handle, &mut client_mode)?; } Ok(PipeConnection { + // SAFETY: // Safe because client_handle is valid handle: unsafe { SafeDescriptor::from_raw_descriptor(client_handle) }, framing_mode: *framing_mode, @@ -604,6 +610,7 @@ impl PipeConnection { overlapped_wrapper: &mut OverlappedWrapper, exit_event: &Event, ) -> Result<()> { + // SAFETY: // Safe because we are providing a valid buffer slice and also providing a valid // overlapped struct. match unsafe { self.read_overlapped(buf, overlapped_wrapper) } { @@ -665,6 +672,7 @@ impl PipeConnection { pub fn get_available_byte_count(&self) -> io::Result { let mut total_bytes_avail: DWORD = 0; + // SAFETY: // Safe because the underlying pipe handle is guaranteed to be open, and the output values // live at valid memory locations. fail_if_zero!(unsafe { @@ -736,6 +744,7 @@ impl PipeConnection { buf: &[T], overlapped: Option<&mut OVERLAPPED>, ) -> Result { + // SAFETY: // Safe because buf points to memory valid until the write completes and we pass a valid // length for that memory. unsafe { @@ -753,6 +762,7 @@ impl PipeConnection { let mut client_mode = DWORD::from(blocking_mode) | self.framing_mode.to_readmode(); self.blocking_mode = *blocking_mode; + // SAFETY: // Safe because the pipe has not been closed (it is managed by this object). unsafe { set_named_pipe_handle_state(self.handle.as_raw_descriptor(), &mut client_mode) } } @@ -827,6 +837,7 @@ impl PipeConnection { overlapped_wrapper: &mut OverlappedWrapper, should_block: bool, ) -> Result<()> { + // SAFETY: // Safe because the handle is valid and we're checking the return // code according to the documentation // @@ -917,6 +928,7 @@ impl PipeConnection { )); } let mut size_transferred = 0; + // SAFETY: // Safe as long as `overlapped_struct` isn't copied and also contains a valid event. // Also the named pipe handle must created with `FILE_FLAG_OVERLAPPED`. fail_if_zero!(unsafe { @@ -934,12 +946,15 @@ impl PipeConnection { /// Cancels I/O Operations in the current process. Since `lpOverlapped` is null, this will /// cancel all I/O requests for the file handle passed in. pub fn cancel_io(&mut self) -> Result<()> { - fail_if_zero!(unsafe { - CancelIoEx( - self.handle.as_raw_descriptor(), - /* lpOverlapped= */ std::ptr::null_mut(), - ) - }); + fail_if_zero!( + // SAFETY: descriptor is valid and the return value is checked. + unsafe { + CancelIoEx( + self.handle.as_raw_descriptor(), + /* lpOverlapped= */ std::ptr::null_mut(), + ) + } + ); Ok(()) } @@ -979,6 +994,7 @@ impl PipeConnection { /// call this if you are sure the client is reading the /// data! pub fn flush_data_blocking(&self) -> Result<()> { + // SAFETY: // Safe because the only buffers interacted with are // outside of Rust memory fail_if_zero!(unsafe { FlushFileBuffers(self.as_raw_descriptor()) }); @@ -987,6 +1003,7 @@ impl PipeConnection { /// For a server pipe, disconnect all clients, discarding any buffered data. pub fn disconnect_clients(&self) -> Result<()> { + // SAFETY: // Safe because we own the handle passed in and know it will remain valid for the duration // of the call. Discarded buffers are not managed by rust. fail_if_zero!(unsafe { DisconnectNamedPipe(self.as_raw_descriptor()) }); @@ -1006,11 +1023,14 @@ impl IntoRawDescriptor for PipeConnection { } } +// SAFETY: Send safety is ensured by inner fields. unsafe impl Send for PipeConnection {} +// SAFETY: Sync safety is ensured by inner fields. unsafe impl Sync for PipeConnection {} impl io::Read for PipeConnection { fn read(&mut self, buf: &mut [u8]) -> io::Result { + // SAFETY: // This is safe because PipeConnection::read is always safe for u8 unsafe { PipeConnection::read(self, buf) } } @@ -1118,16 +1138,16 @@ impl MultiPartMessagePipe { Ok(()) } - /// # Safety - /// `buf` and `overlapped_wrapper` will be in use for the duration of - /// the overlapped operation. These must not be reused and must live until - /// after `get_overlapped_result()` has been called which is done right - /// after this call. fn write_overlapped_blocking_message_internal( pipe: &mut PipeConnection, buf: &[T], overlapped_wrapper: &mut OverlappedWrapper, ) -> Result<()> { + // Safety: + // `buf` and `overlapped_wrapper` will be in use for the duration of + // the overlapped operation. These must not be reused and must live until + // after `get_overlapped_result()` has been called which is done right + // after this call. unsafe { pipe.write_overlapped(buf, overlapped_wrapper)?; } @@ -1229,6 +1249,7 @@ mod tests { let (p1, p2) = pair(&FramingMode::Byte, &BlockingMode::Wait, 0).unwrap(); // Test both forward and reverse direction since the underlying APIs are a bit asymmetrical + // SAFETY: trivially safe with pipe created and return value checked. unsafe { for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() { println!("{}", dir); @@ -1284,6 +1305,7 @@ mod tests { let (p1, p2) = pair(&FramingMode::Message, &BlockingMode::Wait, 0).unwrap(); // Test both forward and reverse direction since the underlying APIs are a bit asymmetrical + // SAFETY: trivially safe with pipe created and return value checked. unsafe { for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() { println!("{}", dir); @@ -1310,6 +1332,7 @@ mod tests { let mut recv_buffer: [u8; 1] = [0; 1]; // Test both forward and reverse direction since the underlying APIs are a bit asymmetrical + // SAFETY: trivially safe with PipeConnection created and return value checked. unsafe { for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() { println!("{}", dir); @@ -1362,6 +1385,7 @@ mod tests { ) .unwrap(); + // SAFETY: // Safe because `read_overlapped` can be called since overlapped struct is created. unsafe { let mut p1_overlapped_wrapper = @@ -1419,9 +1443,9 @@ mod tests { let res = unsafe { p1.write_overlapped(&data, &mut overlapped_wrapper) }; assert!(res.is_ok()); - // SAFETY: safe because we know the unsafe re-use of overlapped wrapper - // will error out. let res = + // SAFETY: safe because we know the unsafe re-use of overlapped wrapper + // will error out. unsafe { p2.write_overlapped(&[75, 77, 54, 82, 76, 65], &mut overlapped_wrapper) }; assert!(res.is_err()); diff --git a/base/src/sys/windows/platform_timer_utils.rs b/base/src/sys/windows/platform_timer_utils.rs index 5063358968..a73e65765c 100644 --- a/base/src/sys/windows/platform_timer_utils.rs +++ b/base/src/sys/windows/platform_timer_utils.rs @@ -34,6 +34,7 @@ static mut NT_LIBRARY: MaybeUninit = MaybeUninit::uninit(); #[inline] fn init_ntdll() -> Result { NT_INIT.call_once(|| { + // SAFETY: return value is checked. unsafe { *NT_LIBRARY.as_mut_ptr() = libloaderapi::LoadLibraryW(win32_wide_string("ntdll").as_ptr()); @@ -44,6 +45,7 @@ fn init_ntdll() -> Result { }; }); + // SAFETY: NT_LIBRARY initialized above. let handle = unsafe { NT_LIBRARY.assume_init() }; if handle.is_null() { Err(Error::from(io::Error::new( @@ -56,6 +58,7 @@ fn init_ntdll() -> Result { } fn get_symbol(handle: HMODULE, proc_name: &str) -> Result<*mut minwindef::__some_function> { + // SAFETY: return value is checked. let symbol = unsafe { libloaderapi::GetProcAddress(handle, win32_string(proc_name).as_ptr()) }; if symbol.is_null() { Err(Error::last()) @@ -68,6 +71,7 @@ fn get_symbol(handle: HMODULE, proc_name: &str) -> Result<*mut minwindef::__some pub fn nt_query_timer_resolution() -> Result<(Duration, Duration)> { let handle = init_ntdll()?; + // SAFETY: trivially safe let func = unsafe { std::mem::transmute::< *mut minwindef::__some_function, @@ -99,6 +103,7 @@ pub fn nt_query_timer_resolution() -> Result<(Duration, Duration)> { pub fn nt_set_timer_resolution(resolution: Duration) -> Result<()> { let handle = init_ntdll()?; + // SAFETY: trivially safe let func = unsafe { std::mem::transmute::< *mut minwindef::__some_function, @@ -150,10 +155,11 @@ pub fn set_time_period(res: Duration, begin: bool) -> Result<()> { panic!("time(Begin|End)Period does not support values above u32::MAX.",); } - // Trivially safe. Note that the casts are safe because we know res is within u32's range. let ret = if begin { + // SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range. unsafe { timeBeginPeriod(res.as_millis() as u32) } } else { + // SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range. unsafe { timeEndPeriod(res.as_millis() as u32) } }; if ret != TIMERR_NOERROR { diff --git a/base/src/sys/windows/priority.rs b/base/src/sys/windows/priority.rs index bd92dcc2ed..d3a2d6cb02 100644 --- a/base/src/sys/windows/priority.rs +++ b/base/src/sys/windows/priority.rs @@ -16,6 +16,7 @@ use super::errno_result; use super::Result; pub fn set_audio_thread_priority() -> Result { + // SAFETY: // Safe because we know Pro Audio is part of windows and we down task_index. let multimedia_handle = unsafe { let mut task_index: u32 = 0; @@ -28,6 +29,7 @@ pub fn set_audio_thread_priority() -> Result { if multimedia_handle.is_null() { warn!( "Failed to set audio thread to Pro Audio. Error: {}", + // SAFETY: trivially safe unsafe { GetLastError() } ); errno_result() @@ -38,6 +40,7 @@ pub fn set_audio_thread_priority() -> Result { pub fn set_thread_priority(thread_priority: i32) -> Result<()> { let res = + // SAFETY: // Safe because priority level value is valid and a valid thread handle will be passed in unsafe { SetThreadPriority(GetCurrentThread(), thread_priority) }; if res == 0 { @@ -53,13 +56,16 @@ pub struct SafeMultimediaHandle { impl Drop for SafeMultimediaHandle { fn drop(&mut self) { + // SAFETY: // Safe because we `multimedia_handle` is defined in the same thread and is created in the // function above. `multimedia_handle` needs be created from `AvSetMmThreadCharacteristicsA`. // This will also drop the `mulitmedia_handle`. if unsafe { AvRevertMmThreadCharacteristics(self.multimedia_handle) } == FALSE { - warn!("Failed to revert audio thread. Error: {}", unsafe { - GetLastError() - }); + warn!( + "Failed to revert audio thread. Error: {}", + // SAFETY: trivially safe + unsafe { GetLastError() } + ); } } } @@ -77,6 +83,7 @@ mod test { #[test] #[ignore] fn test_mm_handle_is_dropped() { + // SAFETY: // Safe because the only the only unsafe functions called are to get the thread // priority. unsafe { diff --git a/base/src/sys/windows/punch_hole.rs b/base/src/sys/windows/punch_hole.rs index c005ca419f..094d2c4693 100644 --- a/base/src/sys/windows/punch_hole.rs +++ b/base/src/sys/windows/punch_hole.rs @@ -37,6 +37,7 @@ pub(crate) fn file_punch_hole(handle: &File, offset: u64, length: u64) -> io::Re BeyondFinalZero: *end_offset, }; + // SAFETY: // Safe because we check the return value and all values should be set let result = unsafe { super::ioctl::ioctl_with_ref(handle, FSCTL_SET_ZERO_DATA, &zero_data) }; diff --git a/base/src/sys/windows/sched.rs b/base/src/sys/windows/sched.rs index 71c2786248..6aa315e77b 100644 --- a/base/src/sys/windows/sched.rs +++ b/base/src/sys/windows/sched.rs @@ -37,6 +37,7 @@ pub fn set_cpu_affinity>(cpus: I) -> Result } pub fn set_cpu_affinity_mask(affinity_mask: usize) -> Result { + // SAFETY: trivially safe as return value is checked. let res: usize = unsafe { let thread_handle = GetCurrentThread(); SetThreadAffinityMask(thread_handle, affinity_mask) @@ -63,6 +64,7 @@ mod tests { fn cpu_affinity() { let mut process_affinity_mask: usize = 0; let mut system_affinity_mask: usize = 0; + // SAFETY: trivially safe as return value is checked. let res = unsafe { GetProcessAffinityMask( GetCurrentProcess(), diff --git a/base/src/sys/windows/shm.rs b/base/src/sys/windows/shm.rs index 065e9bbb64..e26ceb71d6 100644 --- a/base/src/sys/windows/shm.rs +++ b/base/src/sys/windows/shm.rs @@ -15,13 +15,15 @@ use crate::SharedMemory; impl PlatformSharedMemory for SharedMemory { fn new(_debug_name: &CStr, size: u64) -> Result { - // Safe because we do not provide a handle. let mapping_handle = + // SAFETY: + // Safe because we do not provide a handle. unsafe { create_file_mapping(None, size, PAGE_EXECUTE_READWRITE, None) } .map_err(super::Error::from)?; - // Safe because we have exclusive ownership of mapping_handle & it is valid. Self::from_safe_descriptor( + // SAFETY: + // Safe because we have exclusive ownership of mapping_handle & it is valid. unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) }, size, ) diff --git a/base/src/sys/windows/stream_channel.rs b/base/src/sys/windows/stream_channel.rs index 7e923bfc20..3ed6c6b627 100644 --- a/base/src/sys/windows/stream_channel.rs +++ b/base/src/sys/windows/stream_channel.rs @@ -203,11 +203,10 @@ impl StreamChannel { // could stall readers.) let _read_lock = self.read_lock.lock(); - let res = unsafe { - // Safe because no partial reads are possible, and the underlying code bounds the - // read by buf's size. - self.pipe_conn.read(buf) - }; + // SAFETY: + // Safe because no partial reads are possible, and the underlying code bounds the + // read by buf's size. + let res = unsafe { self.pipe_conn.read(buf) }; // The entire goal of this complex section is to avoid the need for shared memory between // each channel end to synchronize the notification state. It is very subtle, modify with diff --git a/base/src/sys/windows/syslog.rs b/base/src/sys/windows/syslog.rs index b05d1fd91a..349353aa88 100644 --- a/base/src/sys/windows/syslog.rs +++ b/base/src/sys/windows/syslog.rs @@ -26,8 +26,10 @@ use crate::syslog::Log; use crate::syslog::Syslog; use crate::RawDescriptor; +// SAFETY: // On windows RawDescriptor is !Sync + !Send, but also on windows we don't do anything with them unsafe impl Sync for crate::syslog::State {} +// SAFETY: See comments for impl Sync unsafe impl Send for crate::syslog::State {} pub struct PlatformSyslog {} diff --git a/base/src/sys/windows/system_info.rs b/base/src/sys/windows/system_info.rs index 65cd9ead5b..6b0a98babe 100644 --- a/base/src/sys/windows/system_info.rs +++ b/base/src/sys/windows/system_info.rs @@ -19,6 +19,7 @@ struct SystemInfo { } static SYSTEM_INFO: Lazy = Lazy::new(|| { + // SAFETY: // Safe because this is a universally available call on modern Windows systems. let sysinfo = unsafe { let mut sysinfo = MaybeUninit::::uninit(); @@ -51,6 +52,7 @@ pub fn allocation_granularity() -> u64 { /// Cross-platform wrapper around getting the current process id. #[inline(always)] pub fn getpid() -> Pid { + // SAFETY: // Safe because we only use the return value. unsafe { GetCurrentProcessId() } } diff --git a/base/src/sys/windows/terminal.rs b/base/src/sys/windows/terminal.rs index a64e2cb07e..00c5cf979f 100644 --- a/base/src/sys/windows/terminal.rs +++ b/base/src/sys/windows/terminal.rs @@ -35,6 +35,7 @@ pub unsafe trait Terminal { let descriptor = self.terminal_descriptor(); let mut orig_mode = 0; + // SAFETY: // Safe because we provide a valid descriptor and pointer and we check the return result. if unsafe { GetConsoleMode(descriptor, &mut orig_mode) } == 0 { return Err(Error::last()); @@ -43,6 +44,7 @@ pub unsafe trait Terminal { let new_mode = (orig_mode | ENABLE_VIRTUAL_TERMINAL_INPUT) & !(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT); + // SAFETY: // Safe because the syscall will only read the extent of mode and we check the return result. if unsafe { SetConsoleMode(descriptor, new_mode) } == 0 { return Err(Error::last()); @@ -53,6 +55,7 @@ pub unsafe trait Terminal { /// Set this terminal's mode to a previous state returned by `set_raw_mode()`. fn restore_mode(&self, mode: DWORD) -> Result<()> { + // SAFETY: // Safe because the syscall will only read the extent of mode and we check the return result. if unsafe { SetConsoleMode(self.terminal_descriptor(), mode) } == 0 { Err(Error::last()) @@ -62,6 +65,7 @@ pub unsafe trait Terminal { } } +// SAFETY: // Safe because we return a genuine terminal descriptor that never changes and shares our lifetime. unsafe impl Terminal for Stdin { fn terminal_descriptor(&self) -> RawDescriptor { diff --git a/base/src/sys/windows/timer.rs b/base/src/sys/windows/timer.rs index ee146b2dc3..0f899a36df 100644 --- a/base/src/sys/windows/timer.rs +++ b/base/src/sys/windows/timer.rs @@ -38,6 +38,7 @@ impl Timer { /// `reset`. Note that this timer MAY wake/trigger early due to limitations on /// SetWaitableTimer (see ). pub fn new() -> Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let handle = unsafe { CreateWaitableTimerA( @@ -59,8 +60,9 @@ impl Timer { return errno_result(); } - // Safe because we uniquely own the file descriptor. Ok(Timer { + // SAFETY: + // Safe because we uniquely own the file descriptor. handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) }, interval: None, }) @@ -100,6 +102,7 @@ impl TimerTrait for Timer { None => 0, }; + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { SetWaitableTimer( @@ -119,6 +122,7 @@ impl TimerTrait for Timer { } fn wait(&mut self) -> Result<()> { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { WaitForSingleObject(self.as_raw_descriptor(), INFINITE) }; @@ -137,6 +141,7 @@ impl TimerTrait for Timer { } fn clear(&mut self) -> Result<()> { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { CancelWaitableTimer(self.as_raw_descriptor()) }; diff --git a/base/src/sys/windows/wait.rs b/base/src/sys/windows/wait.rs index 183527ae98..e7a5119774 100644 --- a/base/src/sys/windows/wait.rs +++ b/base/src/sys/windows/wait.rs @@ -198,6 +198,8 @@ impl EventContext { // which always populates the list. return Err(Error::new(ERROR_INVALID_PARAMETER)); } + // SAFETY: raw handles array is expected to contain valid handles and the return value of + // the function is checked. let result = unsafe { WaitForMultipleObjects( raw_handles_list.len() as DWORD, @@ -254,14 +256,18 @@ impl EventContext { if handles_offset >= handles_len { break; } - event_index = (unsafe { - WaitForMultipleObjects( - (raw_handles_list.len() - handles_offset) as DWORD, - raw_handles_list[handles_offset..].as_ptr(), - FALSE, // return when one event is signaled - 0, /* instantaneous timeout */ - ) - } - WAIT_OBJECT_0) as usize; + event_index = ( + // SAFETY: raw handles array is expected to contain valid handles and the + // return value of the function is checked. + unsafe { + WaitForMultipleObjects( + (raw_handles_list.len() - handles_offset) as DWORD, + raw_handles_list[handles_offset..].as_ptr(), + FALSE, // return when one event is signaled + 0, /* instantaneous timeout */ + ) + } - WAIT_OBJECT_0 + ) as usize; if event_index >= (handles_len - handles_offset) { // This indicates a failure condition, as return values greater than the length diff --git a/base/src/volatile_memory.rs b/base/src/volatile_memory.rs index f7d4036a25..c509ae7546 100644 --- a/base/src/volatile_memory.rs +++ b/base/src/volatile_memory.rs @@ -122,6 +122,7 @@ impl<'a> VolatileSlice<'a> { pub fn as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>] { + // SAFETY: // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`. unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) } } @@ -131,6 +132,7 @@ impl<'a> VolatileSlice<'a> { pub fn as_iobufs_mut<'mem, 'slice>( iovs: &'slice mut [VolatileSlice<'mem>], ) -> &'slice mut [IoBufMut<'mem>] { + // SAFETY: // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`. unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBufMut, iovs.len()) } } @@ -149,6 +151,7 @@ impl<'a> VolatileSlice<'a> { .checked_sub(count) .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?; + // SAFETY: // Safe because the memory has the same lifetime and points to a subset of the memory of the // original slice. unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) } @@ -174,6 +177,7 @@ impl<'a> VolatileSlice<'a> { }, )?; + // SAFETY: // Safe because we have verified that the new memory is a subset of the original slice. Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) }) } @@ -196,6 +200,7 @@ impl<'a> VolatileSlice<'a> { /// # Ok(()) /// # } pub fn write_bytes(&self, value: u8) { + // SAFETY: // Safe because the memory is valid and needs only byte alignment. unsafe { write_bytes(self.as_mut_ptr(), value, self.size()); @@ -230,6 +235,7 @@ impl<'a> VolatileSlice<'a> { { let mut addr = self.as_mut_ptr() as *const u8; for v in buf.iter_mut().take(self.size() / size_of::()) { + // SAFETY: Safe because buf is valid, aligned to type `T` and is initialized. unsafe { *v = read_volatile(addr as *const T); addr = addr.add(size_of::()); @@ -253,6 +259,7 @@ impl<'a> VolatileSlice<'a> { /// # } /// ``` pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) { + // SAFETY: Safe because slice is valid and is byte aligned. unsafe { copy( self.as_mut_ptr() as *const u8, @@ -293,6 +300,7 @@ impl<'a> VolatileSlice<'a> { { let mut addr = self.as_mut_ptr(); for v in buf.iter().take(self.size() / size_of::()) { + // SAFETY: Safe because buf is valid, aligned to type `T` and is mutable. unsafe { write_volatile( addr as *mut T, @@ -318,11 +326,11 @@ impl<'a> VolatileSlice<'a> { let aligned_tail_addr = tail_addr & !MASK_4BIT; // Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance. - // SAFETY: Each aligned_addr is within VolatileSlice - if (aligned_head_addr..aligned_tail_addr) - .step_by(16) - .any(|aligned_addr| unsafe { *(aligned_addr as *const u128) } != 0) - { + if (aligned_head_addr..aligned_tail_addr).step_by(16).any( + |aligned_addr| + // SAFETY: Each aligned_addr is within VolatileSlice + unsafe { *(aligned_addr as *const u128) } != 0, + ) { return false; } @@ -346,7 +354,7 @@ impl<'a> VolatileSlice<'a> { /// /// This checks byte by byte. /// -/// ## Safety +/// # Safety /// /// * `head_addr` <= `tail_addr` /// * Bytes between `head_addr` and `tail_addr` is valid to access. @@ -417,7 +425,10 @@ mod tests { }, )?; - Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) }) + Ok( + // SAFETY: trivially safe + unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) }, + ) } } diff --git a/base/tests/linux/main.rs b/base/tests/linux/main.rs index c30506345d..a87561407c 100644 --- a/base/tests/linux/main.rs +++ b/base/tests/linux/main.rs @@ -52,10 +52,12 @@ fn safe_descriptor_from_path_none() { #[test] #[allow(clippy::eq_op)] fn clone_equality() { + // SAFETY: Safe because return value is checked. let ret = unsafe { libc::eventfd(0, 0) }; if ret < 0 { panic!("failed to create eventfd"); } + // SAFETY: Safe because ret is valid and return value is checked. let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; assert_eq!(descriptor, descriptor); @@ -65,10 +67,13 @@ fn clone_equality() { descriptor.try_clone().expect("failed to clone eventfd") ); + // SAFETY: Safe because return value is checked. let ret = unsafe { libc::eventfd(0, 0) }; if ret < 0 { panic!("failed to create eventfd"); } + + // SAFETY: Safe because ret is valid and return value is checked. let another = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; assert_ne!(descriptor, another); diff --git a/base/tests/linux/net.rs b/base/tests/linux/net.rs index a26a62b309..b6eec17ad0 100644 --- a/base/tests/linux/net.rs +++ b/base/tests/linux/net.rs @@ -36,6 +36,7 @@ fn unix_seqpacket_listener_from_fd() { UnixSeqpacketListener::bind(&socket_path).expect("failed to create UnixSeqpacketListener"), ); // UnixSeqpacketListener should succeed on a valid listening descriptor. + // SAFETY: Safe because `listener` is valid and the return value is checked. let good_dup = UnixSeqpacketListener::bind(format!("/proc/self/fd/{}", unsafe { libc::dup(listener.as_raw_descriptor()) })); @@ -46,6 +47,7 @@ fn unix_seqpacket_listener_from_fd() { assert!(good_dup_path.is_err()); // UnixSeqpacketListener must fail on an existing non-listener socket. let s1 = UnixSeqpacket::connect(socket_path.as_path()).expect("UnixSeqpacket::connect failed"); + // SAFETY: Safe because `s1` is valid and the return value is checked. let bad_dup = UnixSeqpacketListener::bind(format!("/proc/self/fd/{}", unsafe { libc::dup(s1.as_raw_descriptor()) })); diff --git a/base/tests/linux/tube.rs b/base/tests/linux/tube.rs index f622ebbe62..1decdb14c3 100644 --- a/base/tests/linux/tube.rs +++ b/base/tests/linux/tube.rs @@ -37,9 +37,9 @@ fn test_serialize_tube_new() { let msg_descriptors = msg_serialize.into_descriptors(); // Deserialize the Tube - let msg_descriptors_safe = msg_descriptors - .into_iter() - .map(|v| unsafe { SafeDescriptor::from_raw_descriptor(v) }); + let msg_descriptors_safe = msg_descriptors.into_iter().map(|v| + // SAFETY: Safe because `v` is a valid descriptor + unsafe { SafeDescriptor::from_raw_descriptor(v) }); let tube_deserialized: Tube = deserialize_with_descriptors(|| serde_json::from_slice(&serialized), msg_descriptors_safe) .unwrap(); diff --git a/base/tests/process.rs b/base/tests/process.rs index 1368e4d9d3..30f71accdc 100644 --- a/base/tests/process.rs +++ b/base/tests/process.rs @@ -52,6 +52,7 @@ mod test { assert_eq!(thread_comm, thread_name + "\n"); + // SAFETY: child pid is expected to be valid and we wait on the child unsafe { libc::kill(child.pid, libc::SIGKILL) }; child.wait().unwrap(); } @@ -75,6 +76,7 @@ mod test { assert_eq!(thread_comm, "123456789012345\n"); + // SAFETY: child pid is expected to be valid and we wait on the child unsafe { libc::kill(child.pid, libc::SIGKILL) }; child.wait().unwrap(); } diff --git a/base/tests/tube.rs b/base/tests/tube.rs index 468992ae03..bc5caf373e 100644 --- a/base/tests/tube.rs +++ b/base/tests/tube.rs @@ -135,9 +135,9 @@ fn test_serialize_tube_pair() { let msg_descriptors = msg_serialize.into_descriptors(); // Deserialize the Tube - let msg_descriptors_safe = msg_descriptors - .into_iter() - .map(|v| unsafe { SafeDescriptor::from_raw_descriptor(v) }); + let msg_descriptors_safe = msg_descriptors.into_iter().map(|v| + // SAFETY: `v` is expected to be valid + unsafe { SafeDescriptor::from_raw_descriptor(v) }); let tube_deserialized: Tube = deserialize_with_descriptors(|| serde_json::from_slice(&serialized), msg_descriptors_safe) .unwrap(); diff --git a/broker_ipc/src/lib.rs b/broker_ipc/src/lib.rs index c6bac27610..4f396615a3 100644 --- a/broker_ipc/src/lib.rs +++ b/broker_ipc/src/lib.rs @@ -70,8 +70,9 @@ pub fn common_child_setup(args: CommonChildStartupArgs) -> anyhow::Result &[S] { let valid_length = self.get_valid_len(); + // SAFETY: // Safe because the length has been validated. unsafe { self.entries[0].get_slice(valid_length) } } @@ -157,6 +158,7 @@ where pub fn mut_entries_slice(&mut self) -> &mut [S] { let valid_length = self.get_valid_len(); self.entries[0].set_len(valid_length); + // SAFETY: // Safe because the length has been validated. unsafe { self.entries[0].get_mut_slice(valid_length) } } diff --git a/cros_async/src/blocking/sys/linux/block_on.rs b/cros_async/src/blocking/sys/linux/block_on.rs index abb420a3ee..e89a75dd85 100644 --- a/cros_async/src/blocking/sys/linux/block_on.rs +++ b/cros_async/src/blocking/sys/linux/block_on.rs @@ -30,6 +30,7 @@ impl ArcWake for Waker { fn wake_by_ref(arc_self: &Arc) { let state = arc_self.0.swap(WOKEN, Ordering::Release); if state == WAITING { + // SAFETY: // The thread hasn't already been woken up so wake it up now. Safe because this doesn't // modify any memory and we check the return value. let res = unsafe { @@ -71,6 +72,7 @@ pub fn block_on(f: F) -> F::Output { let state = thread_waker.0.swap(WAITING, Ordering::Acquire); if state == WAITING { + // SAFETY: // If we weren't already woken up then wait until we are. Safe because this doesn't // modify any memory and we check the return value. let res = unsafe { diff --git a/cros_async/src/mem.rs b/cros_async/src/mem.rs index c7e2b314d9..ec9bb5a912 100644 --- a/cros_async/src/mem.rs +++ b/cros_async/src/mem.rs @@ -201,6 +201,7 @@ impl VecIoWrapper { } } +// SAFETY: // Safe to implement BackingMemory as the vec is only accessible inside the wrapper and these iovecs // are the only thing allowed to modify it. Nothing else can get a reference to the vec until all // iovecs are dropped because they borrow Self. Nothing can borrow the owned inner vec until self @@ -208,6 +209,7 @@ impl VecIoWrapper { unsafe impl BackingMemory for VecIoWrapper { fn get_volatile_slice(&self, mem_range: MemRegion) -> Result> { self.check_addrs(&mem_range)?; + // SAFETY: // Safe because the mem_range range is valid in the backing memory as checked above. unsafe { Ok(VolatileSlice::from_raw_parts( diff --git a/cros_async/src/sync/cv.rs b/cros_async/src/sync/cv.rs index 77db210823..e579f175d2 100644 --- a/cros_async/src/sync/cv.rs +++ b/cros_async/src/sync/cv.rs @@ -189,6 +189,7 @@ impl Condvar { oldstate = self.state.load(Ordering::Relaxed); } + // SAFETY: // Safe because the spin lock guarantees exclusive access and the reference does not escape // this function. let mu = unsafe { &mut *self.mu.get() }; @@ -200,6 +201,7 @@ impl Condvar { _ => panic!("Attempting to use Condvar with more than one RwLock at the same time"), } + // SAFETY: // Safe because the spin lock guarantees exclusive access. unsafe { (*self.waiters.get()).push_back(waiter) }; @@ -241,12 +243,14 @@ impl Condvar { oldstate = self.state.load(Ordering::Relaxed); } + // SAFETY: // Safe because the spin lock guarantees exclusive access and the reference does not escape // this function. let waiters = unsafe { &mut *self.waiters.get() }; let wake_list = get_wake_list(waiters); let newstate = if waiters.is_empty() { + // SAFETY: // Also clear the rwlock associated with this Condvar since there are no longer any // waiters. Safe because the spin lock guarantees exclusive access. unsafe { *self.mu.get() = 0 }; @@ -299,9 +303,11 @@ impl Condvar { oldstate = self.state.load(Ordering::Relaxed); } + // SAFETY: // Safe because the spin lock guarantees exclusive access to `self.waiters`. let wake_list = unsafe { (*self.waiters.get()).take() }; + // SAFETY: // Clear the rwlock associated with this Condvar since there are no longer any waiters. Safe // because we the spin lock guarantees exclusive access. unsafe { *self.mu.get() = 0 }; @@ -337,6 +343,7 @@ impl Condvar { oldstate = self.state.load(Ordering::Relaxed); } + // SAFETY: // Safe because the spin lock provides exclusive access and the reference does not escape // this function. let waiters = unsafe { &mut *self.waiters.get() }; @@ -344,6 +351,7 @@ impl Condvar { let waiting_for = waiter.is_waiting_for(); // Don't drop the old waiter now as we're still holding the spin lock. let old_waiter = if waiter.is_linked() && waiting_for == WaitingFor::Condvar { + // SAFETY: // Safe because we know that the waiter is still linked and is waiting for the Condvar, // which guarantees that it is still in `self.waiters`. let mut cursor = unsafe { waiters.cursor_mut_from_ptr(waiter as *const Waiter) }; @@ -361,6 +369,7 @@ impl Condvar { }; let set_on_release = if waiters.is_empty() { + // SAFETY: // Clear the rwlock associated with this Condvar since there are no longer any waiters. Safe // because we the spin lock guarantees exclusive access. unsafe { *self.mu.get() = 0 }; @@ -381,7 +390,11 @@ impl Condvar { } } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Send for Condvar {} +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Sync for Condvar {} impl Default for Condvar { @@ -446,6 +459,7 @@ fn get_wake_list(waiters: &mut WaiterList) -> WaiterList { fn cancel_waiter(cv: usize, waiter: &Waiter, wake_next: bool) { let condvar = cv as *const Condvar; + // SAFETY: // Safe because the thread that owns the waiter being canceled must also own a reference to the // Condvar, which guarantees that this pointer is valid. unsafe { (*condvar).cancel_waiter(waiter, wake_next) } @@ -640,6 +654,7 @@ mod test { while *count == 0 { count = cv.wait_read(count).await; } + // SAFETY: Safe because count is valid and is byte aligned. let _ = unsafe { ptr::read_volatile(&*count as *const usize) }; } diff --git a/cros_async/src/sync/mu.rs b/cros_async/src/sync/mu.rs index b3aa508aad..596c4630e8 100644 --- a/cros_async/src/sync/mu.rs +++ b/cros_async/src/sync/mu.rs @@ -305,15 +305,19 @@ impl RawRwLock { { let mut set_on_release = 0; - // Safe because we have acquired the spin lock and it provides exclusive - // access to the waiter queue. if wait_count < LONG_WAIT_THRESHOLD { // Add the waiter to the back of the queue. + // SAFETY: + // Safe because we have acquired the spin lock and it provides exclusive + // access to the waiter queue. unsafe { (*self.waiters.get()).push_back(w.clone()) }; } else { // This waiter has gone through the queue too many times. Put it in the // front of the queue and block all other threads from acquiring the lock // until this one has acquired it at least once. + // SAFETY: + // Safe because we have acquired the spin lock and it provides exclusive + // access to the waiter queue. unsafe { (*self.waiters.get()).push_front(w.clone()) }; // Set the LONG_WAIT bit to prevent all other threads from acquiring the @@ -459,6 +463,7 @@ impl RawRwLock { // to be cleared. let mut clear = SPINLOCK; + // SAFETY: // Safe because the spinlock guarantees exclusive access to the waiter list and // the reference does not escape this function. let waiters = unsafe { &mut *self.waiters.get() }; @@ -530,6 +535,7 @@ impl RawRwLock { oldstate = self.state.load(Ordering::Relaxed); } + // SAFETY: // Safe because the spin lock provides exclusive access and the reference does not escape // this function. let waiters = unsafe { &mut *self.waiters.get() }; @@ -557,6 +563,7 @@ impl RawRwLock { // Don't drop the old waiter while holding the spin lock. let old_waiter = if waiter.is_linked() && waiting_for == WaitingFor::Mutex { + // SAFETY: // We know that the waiter is still linked and is waiting for the rwlock, which // guarantees that it is still linked into `self.waiters`. let mut cursor = unsafe { waiters.cursor_mut_from_ptr(waiter as *const Waiter) }; @@ -613,12 +620,17 @@ impl RawRwLock { } } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Send for RawRwLock {} +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Sync for RawRwLock {} fn cancel_waiter(raw: usize, waiter: &Waiter, wake_next: bool) { let raw_rwlock = raw as *const RawRwLock; + // SAFETY: // Safe because the thread that owns the waiter that is being canceled must also own a reference // to the rwlock, which ensures that this pointer is valid. unsafe { (*raw_rwlock).cancel_waiter(waiter, wake_next) } @@ -728,9 +740,10 @@ impl RwLock { pub async fn lock(&self) -> RwLockWriteGuard<'_, T> { self.raw.lock().await; - // Safe because we have exclusive access to `self.value`. RwLockWriteGuard { mu: self, + // SAFETY: + // Safe because we have exclusive access to `self.value`. value: unsafe { &mut *self.value.get() }, } } @@ -750,9 +763,10 @@ impl RwLock { pub async fn read_lock(&self) -> RwLockReadGuard<'_, T> { self.raw.read_lock().await; - // Safe because we have shared read-only access to `self.value`. RwLockReadGuard { mu: self, + // SAFETY: + // Safe because we have shared read-only access to `self.value`. value: unsafe { &*self.value.get() }, } } @@ -762,9 +776,10 @@ impl RwLock { pub(crate) async fn lock_from_cv(&self) -> RwLockWriteGuard<'_, T> { self.raw.lock_slow::(DESIGNATED_WAKER, 0).await; - // Safe because we have exclusive access to `self.value`. RwLockWriteGuard { mu: self, + // SAFETY: + // Safe because we have exclusive access to `self.value`. value: unsafe { &mut *self.value.get() }, } } @@ -778,9 +793,10 @@ impl RwLock { .lock_slow::(DESIGNATED_WAKER, WRITER_WAITING) .await; - // Safe because we have exclusive access to `self.value`. RwLockReadGuard { mu: self, + // SAFETY: + // Safe because we have exclusive access to `self.value`. value: unsafe { &*self.value.get() }, } } @@ -796,13 +812,18 @@ impl RwLock { } pub fn get_mut(&mut self) -> &mut T { + // SAFETY: // Safe because the compiler statically guarantees that are no other references to `self`. // This is also why we don't need to acquire the lock first. unsafe { &mut *self.value.get() } } } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Send for RwLock {} +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Sync for RwLock {} impl Default for RwLock { diff --git a/cros_async/src/sync/spin.rs b/cros_async/src/sync/spin.rs index cb066ecb49..9af83a13df 100644 --- a/cros_async/src/sync/spin.rs +++ b/cros_async/src/sync/spin.rs @@ -70,6 +70,8 @@ impl SpinLock { hint::spin_loop(); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] SpinLockGuard { lock: self, value: unsafe { &mut *self.value.get() }, @@ -84,13 +86,18 @@ impl SpinLock { /// Returns a mutable reference to the contained value. This method doesn't perform any locking /// as the compiler will statically guarantee that there are no other references to `self`. pub fn get_mut(&mut self) -> &mut T { + // SAFETY: // Safe because the compiler can statically guarantee that there are no other references to // `self`. This is also why we don't need to acquire the lock. unsafe { &mut *self.value.get() } } } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Send for SpinLock {} +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Sync for SpinLock {} impl Default for SpinLock { diff --git a/cros_async/src/sync/waiter.rs b/cros_async/src/sync/waiter.rs index 1c04448f38..2479df21dc 100644 --- a/cros_async/src/sync/waiter.rs +++ b/cros_async/src/sync/waiter.rs @@ -52,15 +52,19 @@ impl DefaultLinkOps for AtomicLink { const NEW: Self::Ops = AtomicLinkOps; } +// SAFETY: // Safe because the only way to mutate `AtomicLink` is via the `LinkedListOps` trait whose methods // are all unsafe and require that the caller has first called `acquire_link` (and had it return // true) to use them safely. unsafe impl Send for AtomicLink {} +// SAFETY: See safety comment for impl Send unsafe impl Sync for AtomicLink {} #[derive(Copy, Clone, Default)] pub struct AtomicLinkOps; +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl LinkOps for AtomicLinkOps { type LinkPtr = NonNull; @@ -73,6 +77,8 @@ unsafe impl LinkOps for AtomicLinkOps { } } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl LinkedListOps for AtomicLinkOps { unsafe fn next(&self, ptr: Self::LinkPtr) -> Option { *ptr.as_ref().next.get() diff --git a/cros_async/src/sys/linux/poll_source.rs b/cros_async/src/sys/linux/poll_source.rs index a522301846..c3dd309131 100644 --- a/cros_async/src/sys/linux/poll_source.rs +++ b/cros_async/src/sys/linux/poll_source.rs @@ -108,8 +108,9 @@ impl PollSource { mut vec: Vec, ) -> AsyncResult<(usize, Vec)> { loop { - // Safe because this will only modify `vec` and we check the return value. let res = if let Some(offset) = file_offset { + // SAFETY: + // Safe because this will only modify `vec` and we check the return value. unsafe { libc::pread64( self.registered_source.duped_fd.as_raw_fd(), @@ -119,6 +120,8 @@ impl PollSource { ) } } else { + // SAFETY: + // Safe because this will only modify `vec` and we check the return value. unsafe { libc::read( self.registered_source.duped_fd.as_raw_fd(), @@ -158,9 +161,10 @@ impl PollSource { .collect::>(); loop { - // Safe because we trust the kernel not to write path the length given and the length is - // guaranteed to be valid from the pointer by io_slice_mut. let res = if let Some(offset) = file_offset { + // SAFETY: + // Safe because we trust the kernel not to write path the length given and the length is + // guaranteed to be valid from the pointer by io_slice_mut. unsafe { libc::preadv64( self.registered_source.duped_fd.as_raw_fd(), @@ -170,6 +174,9 @@ impl PollSource { ) } } else { + // SAFETY: + // Safe because we trust the kernel not to write path the length given and the length is + // guaranteed to be valid from the pointer by io_slice_mut. unsafe { libc::readv( self.registered_source.duped_fd.as_raw_fd(), @@ -213,8 +220,9 @@ impl PollSource { vec: Vec, ) -> AsyncResult<(usize, Vec)> { loop { - // Safe because this will not modify any memory and we check the return value. let res = if let Some(offset) = file_offset { + // SAFETY: + // Safe because this will not modify any memory and we check the return value. unsafe { libc::pwrite64( self.registered_source.duped_fd.as_raw_fd(), @@ -224,6 +232,8 @@ impl PollSource { ) } } else { + // SAFETY: + // Safe because this will not modify any memory and we check the return value. unsafe { libc::write( self.registered_source.duped_fd.as_raw_fd(), @@ -264,9 +274,10 @@ impl PollSource { .collect::>(); loop { - // Safe because we trust the kernel not to write path the length given and the length is - // guaranteed to be valid from the pointer by io_slice_mut. let res = if let Some(offset) = file_offset { + // SAFETY: + // Safe because we trust the kernel not to write path the length given and the length is + // guaranteed to be valid from the pointer by io_slice_mut. unsafe { libc::pwritev64( self.registered_source.duped_fd.as_raw_fd(), @@ -276,6 +287,9 @@ impl PollSource { ) } } else { + // SAFETY: + // Safe because we trust the kernel not to write path the length given and the length is + // guaranteed to be valid from the pointer by io_slice_mut. unsafe { libc::writev( self.registered_source.duped_fd.as_raw_fd(), @@ -302,8 +316,11 @@ impl PollSource { } } + /// # Safety + /// /// Sync all completed write operations to the backing storage. pub async fn fsync(&self) -> AsyncResult<()> { + // SAFETY: the duped_fd is valid and return value is checked. let ret = unsafe { libc::fsync(self.registered_source.duped_fd.as_raw_fd()) }; if ret == 0 { Ok(()) @@ -344,6 +361,7 @@ impl PollSource { /// Sync all data of completed write operations to the backing storage, avoiding updating extra /// metadata. pub async fn fdatasync(&self) -> AsyncResult<()> { + // SAFETY: the duped_fd is valid and return value is checked. let ret = unsafe { libc::fdatasync(self.registered_source.duped_fd.as_raw_fd()) }; if ret == 0 { Ok(()) diff --git a/cros_async/src/sys/linux/uring_executor.rs b/cros_async/src/sys/linux/uring_executor.rs index c84f6a3a21..c5a91d308e 100644 --- a/cros_async/src/sys/linux/uring_executor.rs +++ b/cros_async/src/sys/linux/uring_executor.rs @@ -158,15 +158,18 @@ impl From for io::Error { static IS_URING_STABLE: Lazy = Lazy::new(|| { let mut utsname = MaybeUninit::zeroed(); + // SAFETY: // Safe because this will only modify `utsname` and we check the return value. let res = unsafe { libc::uname(utsname.as_mut_ptr()) }; if res < 0 { return false; } + // SAFETY: // Safe because the kernel has initialized `utsname`. let utsname = unsafe { utsname.assume_init() }; + // SAFETY: // Safe because the pointer is valid and the kernel guarantees that this is a valid C string. let release = unsafe { CStr::from_ptr(utsname.release.as_ptr()) }; @@ -423,11 +426,10 @@ impl UringReactor { raw: &Arc>, fd: &F, ) -> Result { - let duped_fd = unsafe { - // Safe because duplicating an FD doesn't affect memory safety, and the dup'd FD - // will only be added to the poll loop. - File::from_raw_fd(dup_fd(fd.as_raw_descriptor())?) - }; + // SAFETY: + // Safe because duplicating an FD doesn't affect memory safety, and the dup'd FD + // will only be added to the poll loop. + let duped_fd = unsafe { File::from_raw_fd(dup_fd(fd.as_raw_descriptor())?) }; Ok(RegisteredSource { tag: self @@ -555,6 +557,7 @@ impl UringReactor { let vslice = mem .get_volatile_slice(mem_range) .map_err(|_| Error::InvalidOffset)?; + // SAFETY: // Safe because we guarantee that the memory pointed to by `iovecs` lives until the // transaction is complete and the completion has been returned from `wait()`. Ok(unsafe { IoBufMut::from_raw_parts(vslice.as_mut_ptr(), vslice.size()) }) @@ -572,10 +575,11 @@ impl UringReactor { let entry = ring.ops.vacant_entry(); let next_op_token = entry.key(); + // SAFETY: + // Safe because all the addresses are within the Memory that an Arc is kept for the + // duration to ensure the memory is valid while the kernel accesses it. + // Tested by `dont_drop_backing_mem_read` unit test. unsafe { - // Safe because all the addresses are within the Memory that an Arc is kept for the - // duration to ensure the memory is valid while the kernel accesses it. - // Tested by `dont_drop_backing_mem_read` unit test. self.ctx .add_readv( iovecs, @@ -609,6 +613,7 @@ impl UringReactor { let vslice = mem .get_volatile_slice(mem_range) .map_err(|_| Error::InvalidOffset)?; + // SAFETY: // Safe because we guarantee that the memory pointed to by `iovecs` lives until the // transaction is complete and the completion has been returned from `wait()`. Ok(unsafe { IoBufMut::from_raw_parts(vslice.as_mut_ptr(), vslice.size()) }) @@ -626,10 +631,11 @@ impl UringReactor { let entry = ring.ops.vacant_entry(); let next_op_token = entry.key(); + // SAFETY: + // Safe because all the addresses are within the Memory that an Arc is kept for the + // duration to ensure the memory is valid while the kernel accesses it. + // Tested by `dont_drop_backing_mem_write` unit test. unsafe { - // Safe because all the addresses are within the Memory that an Arc is kept for the - // duration to ensure the memory is valid while the kernel accesses it. - // Tested by `dont_drop_backing_mem_write` unit test. self.ctx .add_writev( iovecs, @@ -800,6 +806,7 @@ impl Drop for UringReactor { } } +// SAFETY: // Used to dup the FDs passed to the executor so there is a guarantee they aren't closed while // waiting in TLS to be added to the main polling context. unsafe fn dup_fd(fd: RawFd) -> Result { diff --git a/cros_async/src/sys/windows/event.rs b/cros_async/src/sys/windows/event.rs index c1384a0d8f..73c04a55b6 100644 --- a/cros_async/src/sys/windows/event.rs +++ b/cros_async/src/sys/windows/event.rs @@ -36,13 +36,14 @@ impl EventAsync { descriptor: &dyn AsRawDescriptor, ex: &Executor, ) -> AsyncResult { - // Safe because: - // a) the underlying Event should be validated by the caller. - // b) we do NOT take ownership of the underlying Event. If we did that would cause an early - // free (and later a double free @ the end of this scope). This is why we have to wrap - // it in ManuallyDrop. - // c) we own the clone that is produced exclusively, so it is safe to take ownership of it. Self::new_without_reset( + // SAFETY: + // Safe because: + // a) the underlying Event should be validated by the caller. + // b) we do NOT take ownership of the underlying Event. If we did that would cause an early + // free (and later a double free @ the end of this scope). This is why we have to wrap + // it in ManuallyDrop. + // c) we own the clone that is produced exclusively, so it is safe to take ownership of it. unsafe { ManuallyDrop::new(Event::from_raw_descriptor(descriptor.as_raw_descriptor())) } diff --git a/cros_async/src/sys/windows/handle_source.rs b/cros_async/src/sys/windows/handle_source.rs index 470cc1e4a7..ab9a038dfc 100644 --- a/cros_async/src/sys/windows/handle_source.rs +++ b/cros_async/src/sys/windows/handle_source.rs @@ -99,6 +99,7 @@ impl HandleWrapper { pub fn cancel_sync_io(&mut self, ret: T) -> T { for handle in &self.handles { // There isn't much we can do if cancel fails. + // SAFETY: trivially safe if unsafe { CancelIoEx(handle.as_raw_descriptor(), null_mut()) } == 0 { warn!( "Cancel IO for handle:{:?} failed with {}", @@ -188,6 +189,7 @@ impl Drop for HandleSource { } fn get_thread_file(descriptors: Vec) -> ManuallyDrop { + // SAFETY: trivially safe // Safe because all callers must exit *before* these handles will be closed (guaranteed by // HandleSource's Drop impl.). unsafe { diff --git a/cros_async/src/sys/windows/io_completion_port.rs b/cros_async/src/sys/windows/io_completion_port.rs index 4eca315ff1..cd6e05e2eb 100644 --- a/cros_async/src/sys/windows/io_completion_port.rs +++ b/cros_async/src/sys/windows/io_completion_port.rs @@ -57,7 +57,7 @@ struct Port { inner: RawDescriptor, } -// # Safety +// SAFETY: // Safe because the Port is dropped before IoCompletionPort goes out of scope unsafe impl Send for Port {} @@ -90,8 +90,10 @@ unsafe fn get_completion_status( ) -> io::Result { let mut bytes_transferred = 0; let mut completion_key = 0; + // SAFETY: trivially safe let mut overlapped: *mut OVERLAPPED = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because: // 1. Memory of pointers passed is stack allocated and lives as long as the syscall. // 2. We check the error so we don't use invalid output values (e.g. overlapped). @@ -133,7 +135,7 @@ unsafe fn get_completion_status( unsafe fn poll(port: RawDescriptor) -> Result> { let mut completion_packets = vec![]; completion_packets.push( - // Safety: caller has ensured that the handle is valid and is for io completion port + // SAFETY: caller has ensured that the handle is valid and is for io completion port unsafe { get_completion_status(port, INFINITE) .map_err(|e| Error::IocpOperationFailed(SysError::from(e)))? @@ -146,8 +148,9 @@ unsafe fn poll(port: RawDescriptor) -> Result> { // get detailed error information for each of the returned overlapped IO operations without // calling GetOverlappedResult. If we have to do that, then it's cheaper to just get each // completion packet individually. - // Safety: caller has ensured that the handle is valid and is for io completion port while completion_packets.len() < ENTRIES_PER_POLL { + // SAFETY: + // Safety: caller has ensured that the handle is valid and is for io completion port match unsafe { get_completion_status(port, 0) } { Ok(pkt) => { completion_packets.push(pkt); @@ -168,7 +171,7 @@ fn iocp_waiter_thread( ) -> Result<()> { let port = port.lock(); loop { - // Safety: caller has ensured that the handle is valid and is for io completion port + // SAFETY: caller has ensured that the handle is valid and is for io completion port let packets = unsafe { poll(port.inner)? }; if !packets.is_empty() { { @@ -265,6 +268,7 @@ impl IoCompletionPort { /// Posts a completion packet to the IO completion port. pub fn post_status(&self, bytes_transferred: u32, completion_key: usize) -> Result<()> { + // SAFETY: // Safe because the IOCP handle is valid. let res = unsafe { PostQueuedCompletionStatus( @@ -296,11 +300,12 @@ impl IoCompletionPort { let mut overlapped_entries: SmallVec<[OVERLAPPED_ENTRY; ENTRIES_PER_POLL]> = smallvec!(OVERLAPPED_ENTRY::default(); ENTRIES_PER_POLL); + let mut entries_removed: ULONG = 0; + // SAFETY: // Safe because: // 1. IOCP is guaranteed to exist by self. // 2. Memory of pointers passed is stack allocated and lives as long as the syscall. // 3. We check the error so we don't use invalid output values (e.g. overlapped). - let mut entries_removed: ULONG = 0; let success = unsafe { GetQueuedCompletionStatusEx( self.port.as_raw_descriptor(), @@ -352,7 +357,7 @@ impl IoCompletionPort { /// Waits for completion events to arrive & returns the completion keys. pub fn poll_unthreaded(&self) -> Result> { - // Safety: safe because port is in scope for the duration of the call. + // SAFETY: safe because port is in scope for the duration of the call. let packets = unsafe { poll(self.port.as_raw_descriptor())? }; let mut completion_packets = SmallVec::with_capacity(ENTRIES_PER_POLL); for pkt in packets { @@ -398,6 +403,7 @@ impl IoCompletionPort { } let mut bytes_transferred = 0; + // SAFETY: trivially safe with return value checked let success = unsafe { GetOverlappedResult( entry.lpCompletionKey as RawDescriptor, @@ -442,10 +448,11 @@ fn create_iocp( None => null_mut(), }; - // Safe because: - // 1. The file handle is open because we have a reference to it. - // 2. The existing IOCP (if applicable) is valid. let port = + // SAFETY: + // Safe because: + // 1. The file handle is open because we have a reference to it. + // 2. The existing IOCP (if applicable) is valid. unsafe { CreateIoCompletionPort(raw_file, raw_existing_iocp, completion_key, concurrency) }; if port.is_null() { @@ -455,6 +462,7 @@ fn create_iocp( if existing_iocp.is_some() { Ok(None) } else { + // SAFETY: // Safe because: // 1. We are creating a new IOCP. // 2. We exclusively own the handle. @@ -502,6 +510,8 @@ mod tests { iocp.register_descriptor(&f).unwrap(); let buf = [0u8; 16]; + // SAFETY: Safe given file is valid, buffers are allocated and initialized and return value + // is checked. unsafe { base::windows::write_file(&f, buf.as_ptr(), buf.len(), Some(&mut overlapped)).unwrap() }; @@ -526,6 +536,8 @@ mod tests { iocp.register_descriptor(&f).unwrap(); let buf = [0u8; 16]; + // SAFETY: Safe given file is valid, buffers are allocated and initialized and return value + // is checked. unsafe { base::windows::write_file(&f, buf.as_ptr(), buf.len(), Some(&mut overlapped)).unwrap() }; diff --git a/cros_async/src/sys/windows/overlapped_source.rs b/cros_async/src/sys/windows/overlapped_source.rs index 10525160a4..802beb9470 100644 --- a/cros_async/src/sys/windows/overlapped_source.rs +++ b/cros_async/src/sys/windows/overlapped_source.rs @@ -106,6 +106,7 @@ impl OverlappedSource { } } +/// SAFETY: /// Safety requirements: /// Same as base::windows::read_file. unsafe fn read( @@ -119,6 +120,7 @@ unsafe fn read( .map_err(|e| AsyncError::OverlappedSource(Error::StdIoReadError(e))) } +/// SAFETY: /// Safety requirements: /// Same as base::windows::write_file. unsafe fn write( @@ -147,6 +149,7 @@ impl OverlappedSource { let overlapped = create_overlapped(file_offset, None); let mut overlapped_op = self.reg_source.register_overlapped_operation(overlapped)?; + // SAFETY: // Safe because we pass a pointer to a valid vec and that same vector's length. unsafe { read( @@ -192,6 +195,7 @@ impl OverlappedSource { AsyncError::OverlappedSource(Error::BackingMemoryVolatileSliceFetchFailed(e)) })?; + // SAFETY: // Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to. unsafe { read( @@ -235,6 +239,7 @@ impl OverlappedSource { let overlapped = create_overlapped(file_offset, None); let mut overlapped_op = self.reg_source.register_overlapped_operation(overlapped)?; + // SAFETY: // Safe because we pass a pointer to a valid vec and that same vector's length. unsafe { write( @@ -281,6 +286,7 @@ impl OverlappedSource { AsyncError::OverlappedSource(Error::BackingMemoryVolatileSliceFetchFailed(e)) })?; + // SAFETY: // Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to. unsafe { write( @@ -313,6 +319,7 @@ impl OverlappedSource { ), ))); } + // SAFETY: // Safe because self.source lives as long as file. let file = ManuallyDrop::new(unsafe { File::from_raw_descriptor(self.source.as_raw_descriptor()) @@ -335,6 +342,7 @@ impl OverlappedSource { ), ))); } + // SAFETY: // Safe because self.source lives as long as file. let mut file = ManuallyDrop::new(unsafe { File::from_raw_descriptor(self.source.as_raw_descriptor()) @@ -348,6 +356,7 @@ impl OverlappedSource { /// Sync all completed write operations to the backing storage. pub async fn fsync(&self) -> AsyncResult<()> { + // SAFETY: // Safe because self.source lives at least as long as the blocking pool thread. Note that // if the blocking pool stalls and shutdown fails, the thread could outlive the file; // however, this would mean things are already badly broken and we have a similar risk in diff --git a/cros_async/src/sys/windows/wait_for_handle.rs b/cros_async/src/sys/windows/wait_for_handle.rs index 01aed58e2b..eed7e008e1 100644 --- a/cros_async/src/sys/windows/wait_for_handle.rs +++ b/cros_async/src/sys/windows/wait_for_handle.rs @@ -90,6 +90,7 @@ where let mut inner = self.inner.lock(); match inner.wait_state { WaitState::New => { + // SAFETY: // Safe because: // a) the callback only runs when WaitForHandle is alive (we cancel it on // drop). @@ -128,6 +129,7 @@ where WaitState::Woken => { inner.wait_state = WaitState::Finished; + // SAFETY: // Safe because: // a) we know a wait was registered and hasn't been unregistered yet. // b) the callback is not queued because we set WT_EXECUTEONLYONCE, and we know @@ -161,13 +163,14 @@ where (inner.wait_state, inner.wait_object) }; - // Safe because self.descriptor is valid in any state except New or Finished. - // - // Note: this method call is critical for supplying the safety guarantee relied upon by - // wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not running - // and won't be scheduled again, which makes it safe to drop self.inner_for_callback - // (wait_for_handle_waker has a non owning pointer to self.inner_for_callback). if current_state != WaitState::New && current_state != WaitState::Finished { + // SAFETY: + // Safe because self.descriptor is valid in any state except New or Finished. + // + // Note: this method call is critical for supplying the safety guarantee relied upon by + // wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not running + // and won't be scheduled again, which makes it safe to drop self.inner_for_callback + // (wait_for_handle_waker has a non owning pointer to self.inner_for_callback). unsafe { unregister_wait(wait_object) } } } diff --git a/cros_async/src/waker.rs b/cros_async/src/waker.rs index 71e6f9553b..a7889ba63f 100644 --- a/cros_async/src/waker.rs +++ b/cros_async/src/waker.rs @@ -62,6 +62,8 @@ unsafe fn drop_weak_raw(data: *const ()) { } pub(crate) fn new_waker(w: Weak) -> Waker { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { Waker::from_raw(RawWaker::new( w.into_raw() as *const (), diff --git a/cros_tracing_types/src/static_strings.rs b/cros_tracing_types/src/static_strings.rs index b59d4dc4bd..73c3b3bd79 100644 --- a/cros_tracing_types/src/static_strings.rs +++ b/cros_tracing_types/src/static_strings.rs @@ -46,6 +46,7 @@ impl StaticString { // Safety: pointers are safe to send between threads. unsafe impl Send for StaticString {} +// SAFETY: // Safe to share across threads, because `register` is protected by a lock and strings inserted // are never removed. unsafe impl Sync for StaticString {} diff --git a/crosvm_control/src/lib.rs b/crosvm_control/src/lib.rs index c870e0d828..08616fd718 100644 --- a/crosvm_control/src/lib.rs +++ b/crosvm_control/src/lib.rs @@ -460,6 +460,7 @@ pub unsafe extern "C" fn crosvm_client_usb_attach( if let Ok(UsbControlResult::Ok { port }) = do_usb_attach(socket_path, dev_path) { if !out_port.is_null() { + // SAFETY: trivially safe unsafe { *out_port = port }; } true @@ -595,8 +596,11 @@ pub unsafe extern "C" fn crosvm_client_modify_battery( if battery_type.is_null() || property.is_null() || target.is_null() { return false; } + // SAFETY: trivially safe let battery_type = unsafe { CStr::from_ptr(battery_type) }; + // SAFETY: trivially safe let property = unsafe { CStr::from_ptr(property) }; + // SAFETY: trivially safe let target = unsafe { CStr::from_ptr(target) }; do_modify_battery( diff --git a/crosvm_plugin/src/lib.rs b/crosvm_plugin/src/lib.rs index 9a36b9b6a6..7e7cf032d1 100644 --- a/crosvm_plugin/src/lib.rs +++ b/crosvm_plugin/src/lib.rs @@ -162,6 +162,7 @@ fn proto_error_to_int(e: protobuf::Error) -> c_int { } fn fd_cast(f: File) -> F { + // SAFETY: // Safe because we are transferring unique ownership. unsafe { F::from_raw_fd(f.into_raw_fd()) } } @@ -533,14 +534,20 @@ impl crosvm { match route.kind { CROSVM_IRQ_ROUTE_IRQCHIP => { let irqchip = entry.mut_irqchip(); + // SAFETY: // Safe because route.kind indicates which union field is valid. irqchip.irqchip = unsafe { route.route.irqchip }.irqchip; + // SAFETY: + // Safe because route.kind indicates which union field is valid. irqchip.pin = unsafe { route.route.irqchip }.pin; } CROSVM_IRQ_ROUTE_MSI => { let msi = entry.mut_msi(); + // SAFETY: // Safe because route.kind indicates which union field is valid. msi.address = unsafe { route.route.msi }.address; + // SAFETY: + // Safe because route.kind indicates which union field is valid. msi.data = unsafe { route.route.msi }.data; } _ => return Err(EINVAL), diff --git a/devices/src/irqchip/kvm/aarch64.rs b/devices/src/irqchip/kvm/aarch64.rs index ab0c7d4f69..f88d99b0f8 100644 --- a/devices/src/irqchip/kvm/aarch64.rs +++ b/devices/src/irqchip/kvm/aarch64.rs @@ -105,12 +105,14 @@ impl KvmKernelIrqChip { }; dist_attr.attr = dist_attr_attr; + // SAFETY: // Safe because we allocated the struct that's being passed in let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &cpu_redist_attr) }; if ret != 0 { return errno_result(); } + // SAFETY: // Safe because we allocated the struct that's being passed in let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &dist_attr) }; if ret != 0 { @@ -126,6 +128,7 @@ impl KvmKernelIrqChip { addr: nr_irqs_ptr as u64, flags: 0, }; + // SAFETY: // Safe because we allocated the struct that's being passed in let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &nr_irqs_attr) }; if ret != 0 { @@ -178,6 +181,7 @@ impl IrqChipAArch64 for KvmKernelIrqChip { flags: 0, }; + // SAFETY: // Safe because we allocated the struct that's being passed in let ret = unsafe { ioctl_with_ref(&self.vgic, KVM_SET_DEVICE_ATTR(), &init_gic_attr) }; if ret != 0 { diff --git a/devices/src/pci/coiommu.rs b/devices/src/pci/coiommu.rs index b37176717b..2db44e8d7d 100644 --- a/devices/src/pci/coiommu.rs +++ b/devices/src/pci/coiommu.rs @@ -361,22 +361,25 @@ fn gfn_to_dtt_pte( mem.get_host_address(GuestAddress(pt_gpa + index)) .context(Error::GetDTTEntry)? - } else { + } else if gfn > dtt_iter.gfn { + // SAFETY: // Safe because we checked that dtt_iter.ptr is valid and that the dtt_pte // for gfn lies on the same dtt page as the dtt_pte for dtt_iter.gfn, which // means the calculated ptr will point to the same page as dtt_iter.ptr - if gfn > dtt_iter.gfn { - unsafe { - dtt_iter - .ptr - .add(mem::size_of::() * (gfn - dtt_iter.gfn) as usize) - } - } else { - unsafe { - dtt_iter - .ptr - .sub(mem::size_of::() * (dtt_iter.gfn - gfn) as usize) - } + unsafe { + dtt_iter + .ptr + .add(mem::size_of::() * (gfn - dtt_iter.gfn) as usize) + } + } else { + // SAFETY: + // Safe because we checked that dtt_iter.ptr is valid and that the dtt_pte + // for gfn lies on the same dtt page as the dtt_pte for dtt_iter.gfn, which + // means the calculated ptr will point to the same page as dtt_iter.ptr + unsafe { + dtt_iter + .ptr + .sub(mem::size_of::() * (dtt_iter.gfn - gfn) as usize) } }; @@ -403,6 +406,7 @@ fn pin_page( .get_host_address_range(GuestAddress(gpa), PAGE_SIZE_4K as usize) .context("failed to get host address")? as u64; + // SAFETY: // Safe because ptr is valid and guaranteed by the gfn_to_dtt_pte. // Test PINNED flag if (unsafe { (*leaf_entry).load(Ordering::Relaxed) } & DTTE_PINNED_FLAG) != 0 { @@ -410,9 +414,11 @@ fn pin_page( return Ok(()); } + // SAFETY: // Safe because the gpa is valid from the gfn_to_dtt_pte and the host_addr // is guaranteed by MemoryMapping interface. if unsafe { vfio_map(vfio_container, gpa, PAGE_SIZE_4K, host_addr) } { + // SAFETY: // Safe because ptr is valid and guaranteed by the gfn_to_dtt_pte. // set PINNED flag unsafe { (*leaf_entry).fetch_or(DTTE_PINNED_FLAG, Ordering::SeqCst) }; @@ -467,6 +473,7 @@ fn unpin_page( }; if force { + // SAFETY: // Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte. // This case is for balloon to evict pages so these pages should // already been locked by balloon and no device driver in VM is @@ -475,6 +482,7 @@ fn unpin_page( unsafe { (*leaf_entry).fetch_and(!DTTE_ACCESSED_FLAG, Ordering::SeqCst) }; } + // SAFETY: // Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte. if let Err(entry) = unsafe { (*leaf_entry).compare_exchange(DTTE_PINNED_FLAG, 0, Ordering::SeqCst, Ordering::SeqCst) @@ -488,6 +496,7 @@ fn unpin_page( UnpinResult::NotPinned } else { if !force { + // SAFETY: // Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte. // The ACCESSED_FLAG is set by the guest if guest requires DMA map for // this page. It represents whether or not this page is touched by the @@ -526,6 +535,7 @@ fn unpin_page( if vfio_unmap(vfio_container, gpa, PAGE_SIZE_4K) { UnpinResult::Unpinned } else { + // SAFETY: // Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte. // make sure the pinned flag is set unsafe { (*leaf_entry).fetch_or(DTTE_PINNED_FLAG, Ordering::SeqCst) }; diff --git a/devices/src/platform/vfio_platform.rs b/devices/src/platform/vfio_platform.rs index 9386db09d7..723640dfc1 100644 --- a/devices/src/platform/vfio_platform.rs +++ b/devices/src/platform/vfio_platform.rs @@ -272,6 +272,7 @@ impl VfioPlatformDevice { Err(_e) => break, }; let host = mmap.as_ptr() as u64; + // SAFETY: // Safe because the given guest_map_start is valid guest bar address. and // the host pointer is correct and valid guaranteed by MemoryMapping interface. match unsafe { diff --git a/devices/src/proxy.rs b/devices/src/proxy.rs index 27a0dfb5b4..f44678c949 100644 --- a/devices/src/proxy.rs +++ b/devices/src/proxy.rs @@ -291,6 +291,7 @@ impl ChildProcIntf { if let Some(swap_device_uffd_sender) = swap_device_uffd_sender { if let Err(e) = swap_device_uffd_sender.on_process_forked() { error!("failed to SwapController::on_process_forked: {:?}", e); + // SAFETY: // exit() is trivially safe. unsafe { libc::exit(1) }; } @@ -305,6 +306,7 @@ impl ChildProcIntf { // TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly // defined. // + // SAFETY: // exit() is trivially safe. // ! Never returns unsafe { libc::exit(0) }; diff --git a/devices/src/serial/sys/windows.rs b/devices/src/serial/sys/windows.rs index dcf258e589..e4f8c6ea93 100644 --- a/devices/src/serial/sys/windows.rs +++ b/devices/src/serial/sys/windows.rs @@ -256,6 +256,8 @@ mod tests { ) .unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { // Check that serial output is sent to the pipe device.write(serial_bus_address(DATA), &[b'T']); diff --git a/devices/src/tsc.rs b/devices/src/tsc.rs index 41082df41a..76168d8edd 100644 --- a/devices/src/tsc.rs +++ b/devices/src/tsc.rs @@ -23,6 +23,7 @@ pub use calibrate::*; pub use cpuid::*; fn rdtsc_safe() -> u64 { + // SAFETY: // Safe because _rdtsc takes no arguments unsafe { _rdtsc() } } diff --git a/devices/src/tsc/calibrate.rs b/devices/src/tsc/calibrate.rs index 64ce56cc86..ce8b0da4dd 100644 --- a/devices/src/tsc/calibrate.rs +++ b/devices/src/tsc/calibrate.rs @@ -450,6 +450,7 @@ mod tests { } fn rdtsc_frequency_higher_than_u32() -> u64 { + // SAFETY: trivially safe unsafe { _rdtsc() }.wrapping_mul(1000) } @@ -471,6 +472,7 @@ mod tests { fn test_offset_identification_core_0() { fn rdtsc_with_core_0_offset_by_100_000() -> u64 { let mut id = 0u32; + // SAFETY: trivially safe let mut value = unsafe { __rdtscp(&mut id as *mut u32) }; if id == 0 { value += 100_000; @@ -513,6 +515,7 @@ mod tests { fn test_offset_identification_core_1() { fn rdtsc_with_core_1_offset_by_100_000() -> u64 { let mut id = 0u32; + // SAFETY: trivially safe let mut value = unsafe { __rdtscp(&mut id as *mut u32) }; if id == 1 { value += 100_000; diff --git a/devices/src/tsc/cpuid.rs b/devices/src/tsc/cpuid.rs index a6f7fce2c5..a7366acfc8 100644 --- a/devices/src/tsc/cpuid.rs +++ b/devices/src/tsc/cpuid.rs @@ -17,6 +17,7 @@ pub type CpuidCountFn = unsafe fn(u32, u32) -> CpuidResult; /// combination. `std::arch::x86_64::__cpuid_count` may be used to provide the CPUID information /// from the host. pub fn tsc_frequency_cpuid(cpuid_count: CpuidCountFn) -> Option { + // SAFETY: // Safe because we pass 0 and 0 for this call and the host supports the `cpuid` instruction. let result = unsafe { cpuid_count(0, 0) }; if result.eax < 0x15 { @@ -35,6 +36,7 @@ pub fn tsc_frequency_cpuid(cpuid_count: CpuidCountFn) -> Option Option Result { + // SAFETY: // Safe as file is vfio container descriptor and ioctl is defined by kernel. let version = unsafe { ioctl(&container, VFIO_GET_API_VERSION()) }; if version as u8 != VFIO_API_VERSION { @@ -362,12 +366,14 @@ impl VfioContainer { } fn check_extension(&self, val: IommuType) -> bool { + // SAFETY: // Safe as file is vfio container and make sure val is valid. let ret = unsafe { ioctl_with_val(self, VFIO_CHECK_EXTENSION(), val as c_ulong) }; ret != 0 } fn set_iommu(&mut self, val: IommuType) -> i32 { + // SAFETY: // Safe as file is vfio container and make sure val is valid. unsafe { ioctl_with_val(self, VFIO_SET_IOMMU(), val as c_ulong) } } @@ -455,6 +461,7 @@ impl VfioContainer { ..Default::default() }; + // SAFETY: // Safe as file is vfio container, dma_unmap is constructed by us, and // we check the return value let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_UNMAP_DMA(), &mut dma_unmap) }; @@ -485,6 +492,7 @@ impl VfioContainer { ..Default::default() }; + // SAFETY: // Safe as file is vfio container, iommu_info has valid values, // and we check the return value let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_GET_INFO(), &mut iommu_info) }; @@ -516,6 +524,7 @@ impl VfioContainer { ..Default::default() }; + // SAFETY: // Safe as file is vfio container, iommu_info_argsz has valid values, // and we check the return value let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_GET_INFO(), &mut iommu_info_argsz) }; @@ -531,14 +540,16 @@ impl VfioContainer { iommu_info_argsz.argsz as usize - mem::size_of::(), ); iommu_info[0].argsz = iommu_info_argsz.argsz; - // Safe as file is vfio container, iommu_info has valid values, - // and we check the return value let ret = + // SAFETY: + // Safe as file is vfio container, iommu_info has valid values, + // and we check the return value unsafe { ioctl_with_mut_ptr(self, VFIO_IOMMU_GET_INFO(), iommu_info.as_mut_ptr()) }; if ret != 0 { return Err(VfioError::IommuGetInfo(get_error())); } + // SAFETY: // Safe because we initialized iommu_info with enough space, u8 has less strict // alignment, and since it will no longer be mutated. let info_bytes = unsafe { @@ -622,6 +633,7 @@ impl VfioContainer { IommuDevType::CoIommu | IommuDevType::PkvmPviommu | IommuDevType::VirtioIommu => {} IommuDevType::NoIommu => { for region in vm.get_memory().regions() { + // SAFETY: // Safe because the guest regions are guaranteed not to overlap unsafe { self.vfio_dma_map( @@ -691,6 +703,8 @@ impl VfioContainer { } pub fn clone_as_raw_descriptor(&self) -> Result { + // SAFETY: this call is safe because it doesn't modify any memory and we + // check the return value. let raw_descriptor = unsafe { libc::dup(self.container.as_raw_descriptor()) }; if raw_descriptor < 0 { Err(VfioError::ContainerDupError) @@ -729,8 +743,9 @@ impl VfioGroup { argsz: mem::size_of::() as u32, flags: 0, }; - // Safe as we are the owner of group_file and group_status which are valid value. let mut ret = + // SAFETY: + // Safe as we are the owner of group_file and group_status which are valid value. unsafe { ioctl_with_mut_ref(&group_file, VFIO_GROUP_GET_STATUS(), &mut group_status) }; if ret < 0 { return Err(VfioError::GetGroupStatus(get_error())); @@ -740,9 +755,10 @@ impl VfioGroup { return Err(VfioError::GroupViable); } + let container_raw_descriptor = container.as_raw_descriptor(); + // SAFETY: // Safe as we are the owner of group_file and container_raw_descriptor which are valid value, // and we verify the ret value - let container_raw_descriptor = container.as_raw_descriptor(); ret = unsafe { ioctl_with_ref( &group_file, @@ -796,6 +812,7 @@ impl VfioGroup { }, }; + // SAFETY: // Safe as we are the owner of vfio_dev_descriptor and vfio_dev_attr which are valid value, // and we verify the return value. if 0 != unsafe { @@ -815,12 +832,14 @@ impl VfioGroup { let path: CString = CString::new(name.as_bytes()).expect("CString::new() failed"); let path_ptr = path.as_ptr(); + // SAFETY: // Safe as we are the owner of self and path_ptr which are valid value. let ret = unsafe { ioctl_with_ptr(self, VFIO_GROUP_GET_DEVICE_FD(), path_ptr) }; if ret < 0 { return Err(VfioError::GroupGetDeviceFD(get_error())); } + // SAFETY: // Safe as ret is valid descriptor Ok(unsafe { File::from_raw_descriptor(ret) }) } @@ -1177,6 +1196,7 @@ impl VfioDevice { let mut device_feature = vec_with_array_field::(0); device_feature[0].argsz = mem::size_of::() as u32; device_feature[0].flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY; + // SAFETY: // Safe as we are the owner of self and power_management which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) }; if ret < 0 { @@ -1197,8 +1217,9 @@ impl VfioDevice { device_feature[0].argsz = (mem::size_of::() + payload_size) as u32; device_feature[0].flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP; + // SAFETY: + // Safe as we know vfio_device_low_power_entry_with_wakeup has two 32-bit int fields unsafe { - // Safe as we know vfio_device_low_power_entry_with_wakeup has two 32-bit int fields device_feature[0] .data .as_mut_slice(payload_size) @@ -1207,6 +1228,7 @@ impl VfioDevice { .as_slice(), ); } + // SAFETY: // Safe as we are the owner of self and power_management which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) }; if ret < 0 { @@ -1221,6 +1243,7 @@ impl VfioDevice { let mut device_feature = vec_with_array_field::(0); device_feature[0].argsz = mem::size_of::() as u32; device_feature[0].flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_EXIT; + // SAFETY: // Safe as we are the owner of self and power_management which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) }; if ret < 0 { @@ -1236,15 +1259,18 @@ impl VfioDevice { let mut dsm = vec_with_array_field::(count); dsm[0].argsz = (mem::size_of::() + mem::size_of_val(args)) as u32; dsm[0].padding = 0; + // SAFETY: // Safe as we allocated enough space to hold args unsafe { dsm[0].args.as_mut_slice(count).clone_from_slice(args); } + // SAFETY: // Safe as we are the owner of self and dsm which are valid value let ret = unsafe { ioctl_with_mut_ref(&self.dev, VFIO_DEVICE_ACPI_DSM(), &mut dsm[0]) }; if ret < 0 { Err(VfioError::VfioAcpiDsm(get_error())) } else { + // SAFETY: // Safe as we allocated enough space to hold args let res = unsafe { dsm[0].args.as_slice(count) }; Ok(res.to_vec()) @@ -1267,10 +1293,12 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = count as u32; + // SAFETY: // It is safe as enough space is reserved through vec_with_array_field(u32). let data = unsafe { irq_set[0].data.as_mut_slice(count * u32_size) }; data.copy_from_slice(&acpi_notification_eventfd.as_raw_descriptor().to_ne_bytes()[..]); + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1289,6 +1317,7 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = 0; + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1310,10 +1339,12 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = 1; + // SAFETY: // It is safe as enough space is reserved through vec_with_array_field(u32). let data = unsafe { irq_set[0].data.as_mut_slice(u32_size) }; data.copy_from_slice(&val.to_ne_bytes()[..]); + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1345,6 +1376,7 @@ impl VfioDevice { irq_set[0].start = subindex; irq_set[0].count = count as u32; + // SAFETY: // irq_set.data could be none, bool or descriptor according to flags, so irq_set.data // is u8 default, here irq_set.data is descriptor as u32, so 4 default u8 are combined // together as u32. It is safe as enough space is reserved through @@ -1359,6 +1391,7 @@ impl VfioDevice { data = right; } + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1386,6 +1419,7 @@ impl VfioDevice { irq_set[0].count = 1; { + // SAFETY: // irq_set.data could be none, bool or descriptor according to flags, so irq_set.data is // u8 default, here irq_set.data is descriptor as u32, so 4 default u8 are combined // together as u32. It is safe as enough space is reserved through @@ -1394,6 +1428,7 @@ impl VfioDevice { descriptors.copy_from_slice(&descriptor.as_raw_descriptor().to_le_bytes()[..]); } + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1412,6 +1447,7 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = 0; + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1430,6 +1466,7 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = 1; + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1448,6 +1485,7 @@ impl VfioDevice { irq_set[0].start = 0; irq_set[0].count = 1; + // SAFETY: // Safe as we are the owner of self and irq_set which are valid value let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) }; if ret < 0 { @@ -1467,6 +1505,7 @@ impl VfioDevice { ..Default::default() }; + // SAFETY: // Safe as we are the owner of device_file and dev_info which are valid value, // and we verify the return value. let ret = unsafe { ioctl_with_mut_ref(device_file, VFIO_DEVICE_GET_INFO(), &mut dev_info) }; @@ -1504,6 +1543,7 @@ impl VfioDevice { index: i, count: 0, }; + // SAFETY: // Safe as we are the owner of dev and irq_info which are valid value, // and we verify the return value. let ret = unsafe { @@ -1539,9 +1579,10 @@ impl VfioDevice { size: 0, offset: 0, }; - // Safe as we are the owner of dev and reg_info which are valid value, - // and we verify the return value. let ret = + // SAFETY: + // Safe as we are the owner of dev and reg_info which are valid value, + // and we verify the return value. unsafe { ioctl_with_mut_ref(dev, VFIO_DEVICE_GET_REGION_INFO(), &mut reg_info) }; if ret < 0 { continue; @@ -1559,6 +1600,7 @@ impl VfioDevice { region_with_cap[0].region_info.cap_offset = 0; region_with_cap[0].region_info.size = 0; region_with_cap[0].region_info.offset = 0; + // SAFETY: // Safe as we are the owner of dev and region_info which are valid value, // and we verify the return value. let ret = unsafe { @@ -1593,27 +1635,33 @@ impl VfioDevice { if offset + cap_header_sz > region_info_sz { break; } + // SAFETY: // Safe, as cap_header struct is in this function allocated region_with_cap // vec. let cap_ptr = unsafe { info_ptr.offset(offset as isize) }; + // SAFETY: + // Safe, as cap_header struct is in this function allocated region_with_cap + // vec. let cap_header = unsafe { &*(cap_ptr as *const vfio_info_cap_header) }; if cap_header.id as u32 == VFIO_REGION_INFO_CAP_SPARSE_MMAP { if offset + mmap_cap_sz > region_info_sz { break; } // cap_ptr is vfio_region_info_cap_sparse_mmap here - // Safe, this vfio_region_info_cap_sparse_mmap is in this function allocated - // region_with_cap vec. let sparse_mmap = + // SAFETY: + // Safe, this vfio_region_info_cap_sparse_mmap is in this function + // allocated region_with_cap vec. unsafe { &*(cap_ptr as *const vfio_region_info_cap_sparse_mmap) }; let area_num = sparse_mmap.nr_areas; if offset + mmap_cap_sz + area_num * mmap_area_sz > region_info_sz { break; } - // Safe, these vfio_region_sparse_mmap_area are in this function allocated - // region_with_cap vec. let areas = + // SAFETY: + // Safe, these vfio_region_sparse_mmap_area are in this function allocated + // region_with_cap vec. unsafe { sparse_mmap.areas.as_slice(sparse_mmap.nr_areas as usize) }; for area in areas.iter() { mmaps.push(*area); @@ -1623,9 +1671,10 @@ impl VfioDevice { break; } // cap_ptr is vfio_region_info_cap_type here - // Safe, this vfio_region_info_cap_type is in this function allocated - // region_with_cap vec let cap_type_info = + // SAFETY: + // Safe, this vfio_region_info_cap_type is in this function allocated + // region_with_cap vec unsafe { &*(cap_ptr as *const vfio_region_info_cap_type) }; cap_info = Some((cap_type_info.type_, cap_type_info.subtype)); @@ -1776,10 +1825,12 @@ impl VfioDevice { /// Reads a value from the specified `VfioRegionAddr.addr` + `offset`. pub fn region_read_from_addr(&self, addr: &VfioRegionAddr, offset: u64) -> T { let mut val = mem::MaybeUninit::zeroed(); - // Safe because we have zero-initialized `size_of::()` bytes. let buf = + // SAFETY: + // Safe because we have zero-initialized `size_of::()` bytes. unsafe { slice::from_raw_parts_mut(val.as_mut_ptr() as *mut u8, mem::size_of::()) }; self.region_read(addr.index, buf, addr.addr + offset); + // SAFETY: // Safe because any bit pattern is valid for a type that implements FromBytes. unsafe { val.assume_init() } } diff --git a/devices/src/virtio/descriptor_utils.rs b/devices/src/virtio/descriptor_utils.rs index 8cf603039b..e067ac4891 100644 --- a/devices/src/virtio/descriptor_utils.rs +++ b/devices/src/virtio/descriptor_utils.rs @@ -738,6 +738,7 @@ impl io::Write for Writer { } let count = cmp::min(rem.len(), b.size()); + // SAFETY: // Safe because we have already verified that `vs` points to valid memory. unsafe { copy_nonoverlapping(rem.as_ptr(), b.as_mut_ptr(), count); diff --git a/devices/src/virtio/fs/caps.rs b/devices/src/virtio/fs/caps.rs index 8846d71757..ac7e26b435 100644 --- a/devices/src/virtio/fs/caps.rs +++ b/devices/src/virtio/fs/caps.rs @@ -113,6 +113,7 @@ pub struct Caps(cap_t); impl Caps { /// Get the capabilities for the current thread. pub fn for_current_thread() -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let caps = unsafe { cap_get_proc() }; if caps.is_null() { @@ -124,6 +125,7 @@ impl Caps { /// Update the capabilities described by `self` by setting or clearing `caps` in `set`. pub fn update(&mut self, caps: &[Capability], set: Set, value: Value) -> io::Result<()> { + // SAFETY: // Safe because this only modifies the memory pointed to by `self.0` and we check the return // value. let ret = unsafe { @@ -146,6 +148,7 @@ impl Caps { /// Apply the capabilities described by `self` to the current thread. pub fn apply(&self) -> io::Result<()> { + // SAFETY: trivially safe if unsafe { cap_set_proc(self.0) } == 0 { Ok(()) } else { @@ -156,6 +159,7 @@ impl Caps { impl Drop for Caps { fn drop(&mut self) { + // SAFETY: cap_t is allocated from `Self` unsafe { cap_free(self.0); } diff --git a/devices/src/virtio/fs/passthrough.rs b/devices/src/virtio/fs/passthrough.rs index cb624fa034..d24c23d9a9 100644 --- a/devices/src/virtio/fs/passthrough.rs +++ b/devices/src/virtio/fs/passthrough.rs @@ -301,6 +301,7 @@ macro_rules! scoped_cred { impl Drop for $name { fn drop(&mut self) { + // SAFETY: trivially safe let res = unsafe { libc::syscall($syscall_nr, -1, self.old, -1) }; if res < 0 { error!( @@ -337,6 +338,8 @@ thread_local! { // SAFETY: both calls take no parameters and only return an integer value. The kernel also // guarantees that they can never fail. static THREAD_EUID: libc::uid_t = unsafe { libc::syscall(SYS_GETEUID) as libc::uid_t }; + // SAFETY: both calls take no parameters and only return an integer value. The kernel also + // guarantees that they can never fail. static THREAD_EGID: libc::gid_t = unsafe { libc::syscall(SYS_GETEGID) as libc::gid_t }; } @@ -1106,14 +1109,18 @@ impl PassthroughFs { ) -> io::Result<(Option, OpenOptions)> { let open_flags = self.update_open_flags(flags as i32); - let fd_open = syscall!(unsafe { - libc::openat64( - parent_data.as_raw_descriptor(), - name.as_ptr(), - (open_flags | libc::O_CLOEXEC) & !(libc::O_NOFOLLOW | libc::O_DIRECT), - ) - })?; + let fd_open = syscall!( + // SAFETY: return value is checked. + unsafe { + libc::openat64( + parent_data.as_raw_descriptor(), + name.as_ptr(), + (open_flags | libc::O_CLOEXEC) & !(libc::O_NOFOLLOW | libc::O_DIRECT), + ) + } + )?; + // SAFETY: fd_open is valid let file_open = unsafe { File::from_raw_descriptor(fd_open) }; let handle = self.next_handle.fetch_add(1, Ordering::Relaxed); let data = HandleData { @@ -1265,8 +1272,8 @@ impl PassthroughFs { let policy_size = cmp::min(arg.policy_size, size_of::() as u64); arg.policy_size = policy_size; - // SAFETY: the kernel will only write to `arg` and we check the return value. let res = + // SAFETY: the kernel will only write to `arg` and we check the return value. unsafe { ioctl_with_mut_ptr(&*data, FS_IOC_GET_ENCRYPTION_POLICY_EX(), &mut arg) }; if res < 0 { Ok(IoctlReply::Done(Err(io::Error::last_os_error()))) @@ -1594,9 +1601,9 @@ impl PassthroughFs { if res < 0 { Ok(IoctlReply::Done(Err(io::Error::last_os_error()))) } else { - // SAFETY: this value was initialized by us already and then overwritten by the kernel. - // TODO: Replace with `MaybeUninit::slice_as_ptr` once it is stabilized. let digest_size = + // SAFETY: this value was initialized by us already and then overwritten by the kernel. + // TODO: Replace with `MaybeUninit::slice_as_ptr` once it is stabilized. unsafe { addr_of!((*(buf.as_ptr() as *const fsverity_digest)).digest_size).read() }; let outlen = size_of::() as u32 + u32::from(digest_size); @@ -1608,16 +1615,16 @@ impl PassthroughFs { )))); } - // SAFETY: any bit pattern is valid for `MaybeUninit` and `fsverity_digest` doesn't - // contain any references. let buf: [MaybeUninit; ROUNDED_LEN * size_of::()] = + // SAFETY: any bit pattern is valid for `MaybeUninit` and `fsverity_digest` + // doesn't contain any references. unsafe { mem::transmute(buf) }; - // SAFETY: Casting to `*const [u8]` is safe because the kernel guarantees that the first - // `outlen` bytes of `buf` are initialized and `MaybeUninit` is guaranteed to have - // the same layout as `u8`. - // TODO: Replace with `MaybeUninit::slice_assume_init_ref` once it is stabilized. let buf = + // SAFETY: Casting to `*const [u8]` is safe because the kernel guarantees that the + // first `outlen` bytes of `buf` are initialized and `MaybeUninit` is guaranteed + // to have the same layout as `u8`. + // TODO: Replace with `MaybeUninit::slice_assume_init_ref` once it is stabilized. unsafe { &*(&buf[..outlen as usize] as *const [MaybeUninit] as *const [u8]) }; Ok(IoctlReply::Done(Ok(buf.to_vec()))) } @@ -2301,12 +2308,15 @@ impl FileSystem for PassthroughFs { } if valid.contains(SetattrValid::SIZE) { - // SAFETY: this doesn't modify any memory and we check the return value. syscall!(match data { - Data::Handle(_, fd) => unsafe { libc::ftruncate64(fd, attr.st_size) }, + Data::Handle(_, fd) => { + // SAFETY: this doesn't modify any memory and we check the return value. + unsafe { libc::ftruncate64(fd, attr.st_size) } + } _ => { // There is no `ftruncateat` so we need to get a new fd and truncate it. let f = self.open_inode(&inode_data, libc::O_NONBLOCK | libc::O_RDWR)?; + // SAFETY: this doesn't modify any memory and we check the return value. unsafe { libc::ftruncate64(f.as_raw_descriptor(), attr.st_size) } } })?; @@ -2542,6 +2552,7 @@ impl FileSystem for PassthroughFs { self.find_handle(handle, inode)? }; + // SAFETY: // Since this method is called whenever an fd is closed in the client, we can emulate that // behavior by doing the same thing (dup-ing the fd and then immediately closing it). Safe // because this doesn't modify any memory and we check the return values. @@ -2664,8 +2675,8 @@ impl FileSystem for PassthroughFs { let path = CString::new(format!("self/fd/{}", file.0.as_raw_descriptor())) .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - // SAFETY: this doesn't modify any memory and we check the return value. syscall!(self.with_proc_chdir(|| { + // SAFETY: this doesn't modify any memory and we check the return value. unsafe { libc::setxattr( path.as_ptr(), @@ -2677,17 +2688,19 @@ impl FileSystem for PassthroughFs { } }))?; } else { - // For regular files and directories, we can just use fsetxattr. - // SAFETY: this doesn't modify any memory and we check the return value. - syscall!(unsafe { - libc::fsetxattr( - file.0.as_raw_descriptor(), - name.as_ptr(), - value.as_ptr() as *const libc::c_void, - value.len() as libc::size_t, - flags as c_int, - ) - })?; + syscall!( + // For regular files and directories, we can just use fsetxattr. + // SAFETY: this doesn't modify any memory and we check the return value. + unsafe { + libc::fsetxattr( + file.0.as_raw_descriptor(), + name.as_ptr(), + value.as_ptr() as *const libc::c_void, + value.len() as libc::size_t, + flags as c_int, + ) + } + )?; } Ok(()) @@ -2788,14 +2801,15 @@ impl FileSystem for PassthroughFs { let path = CString::new(format!("self/fd/{}", file.0.as_raw_descriptor())) .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - // SAFETY: this doesn't modify any memory and we check the return value. - syscall!( - self.with_proc_chdir(|| unsafe { libc::removexattr(path.as_ptr(), name.as_ptr()) }) - )?; + syscall!(self.with_proc_chdir(|| + // SAFETY: this doesn't modify any memory and we check the return value. + unsafe { libc::removexattr(path.as_ptr(), name.as_ptr()) }))?; } else { // For regular files and directories, we can just use fremovexattr. - // SAFETY: this doesn't modify any memory and we check the return value. - syscall!(unsafe { libc::fremovexattr(file.0.as_raw_descriptor(), name.as_ptr()) })?; + syscall!( + // SAFETY: this doesn't modify any memory and we check the return value. + unsafe { libc::fremovexattr(file.0.as_raw_descriptor(), name.as_ptr()) } + )?; } Ok(()) @@ -2967,17 +2981,21 @@ impl FileSystem for PassthroughFs { let src = src_data.as_raw_descriptor(); let dst = dst_data.as_raw_descriptor(); - Ok(syscall!(unsafe { - libc::syscall( - libc::SYS_copy_file_range, - src, - &offset_src, - dst, - &offset_dst, - length, - flags, - ) - })? as usize) + Ok(syscall!( + // SAFETY: this call is safe because it doesn't modify any memory and we + // check the return value. + unsafe { + libc::syscall( + libc::SYS_copy_file_range, + src, + &offset_src, + dst, + &offset_dst, + length, + flags, + ) + } + )? as usize) } fn set_up_mapping( @@ -3137,6 +3155,8 @@ mod tests { // SAFETY: both calls take no parameters and only return an integer value. The kernel also // guarantees that they can never fail. let uid = unsafe { libc::syscall(SYS_GETEUID) as libc::uid_t }; + // SAFETY: both calls take no parameters and only return an integer value. The kernel also + // guarantees that they can never fail. let gid = unsafe { libc::syscall(SYS_GETEGID) as libc::gid_t }; let pid = std::process::id() as libc::pid_t; Context { uid, gid, pid } @@ -3265,18 +3285,23 @@ mod tests { let p = PassthroughFs::new("tag", cfg).expect("Failed to create PassthroughFs"); // Selinux shouldn't get overwritten. + // SAFETY: trivially safe let selinux = unsafe { CStr::from_bytes_with_nul_unchecked(b"security.selinux\0") }; assert_eq!(p.rewrite_xattr_name(selinux).to_bytes(), selinux.to_bytes()); // user, trusted, and system should not be changed either. + // SAFETY: trivially safe let user = unsafe { CStr::from_bytes_with_nul_unchecked(b"user.foobar\0") }; assert_eq!(p.rewrite_xattr_name(user).to_bytes(), user.to_bytes()); + // SAFETY: trivially safe let trusted = unsafe { CStr::from_bytes_with_nul_unchecked(b"trusted.foobar\0") }; assert_eq!(p.rewrite_xattr_name(trusted).to_bytes(), trusted.to_bytes()); + // SAFETY: trivially safe let system = unsafe { CStr::from_bytes_with_nul_unchecked(b"system.foobar\0") }; assert_eq!(p.rewrite_xattr_name(system).to_bytes(), system.to_bytes()); // sehash should be re-written. + // SAFETY: trivially safe let sehash = unsafe { CStr::from_bytes_with_nul_unchecked(b"security.sehash\0") }; assert_eq!( p.rewrite_xattr_name(sehash).to_bytes(), diff --git a/devices/src/virtio/fs/read_dir.rs b/devices/src/virtio/fs/read_dir.rs index 7e98959e8e..d210932d34 100644 --- a/devices/src/virtio/fs/read_dir.rs +++ b/devices/src/virtio/fs/read_dir.rs @@ -32,12 +32,14 @@ pub struct ReadDir

{ impl> ReadDir

{ pub fn new(dir: &D, offset: libc::off64_t, mut buf: P) -> io::Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let res = unsafe { libc::lseek64(dir.as_raw_descriptor(), offset, libc::SEEK_SET) }; if res < 0 { return Err(io::Error::last_os_error()); } + // SAFETY: // Safe because the kernel guarantees that it will only write to `buf` and we check the // return value. let res = unsafe { @@ -117,6 +119,7 @@ fn strip_padding(b: &[u8]) -> &CStr { .position(|&c| c == 0) .expect("`b` doesn't contain any nul bytes"); + // SAFETY: // Safe because we are creating this string with the first nul-byte we found so we can // guarantee that it is nul-terminated and doesn't contain any interior nuls. unsafe { CStr::from_bytes_with_nul_unchecked(&b[..pos + 1]) } diff --git a/devices/src/virtio/fs/worker.rs b/devices/src/virtio/fs/worker.rs index 14551d1b0a..9fcea76b4f 100644 --- a/devices/src/virtio/fs/worker.rs +++ b/devices/src/virtio/fs/worker.rs @@ -188,20 +188,27 @@ impl Worker { // cases. const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2; - // Safe because this doesn't modify any memory and we check the return value. - let mut securebits = syscall!(unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }) - .map_err(Error::GetSecurebits)?; + let mut securebits = syscall!( + // SAFETY: Safe because this doesn't modify any memory and we check the return value. + unsafe { libc::prctl(libc::PR_GET_SECUREBITS) } + ) + .map_err(Error::GetSecurebits)?; securebits |= SECBIT_NO_SETUID_FIXUP; - // Safe because this doesn't modify any memory and we check the return value. - syscall!(unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }) - .map_err(Error::SetSecurebits)?; + syscall!( + // SAFETY: Safe because this doesn't modify any memory and we check the return value. + unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) } + ) + .map_err(Error::SetSecurebits)?; // To avoid extra locking, unshare filesystem attributes from parent. This includes the // current working directory and umask. - // Safe because this doesn't modify any memory and we check the return value. - syscall!(unsafe { libc::unshare(libc::CLONE_FS) }).map_err(Error::UnshareFromParent)?; + syscall!( + // SAFETY: Safe because this doesn't modify any memory and we check the return value. + unsafe { libc::unshare(libc::CLONE_FS) } + ) + .map_err(Error::UnshareFromParent)?; #[derive(EventToken)] enum Token { diff --git a/devices/src/virtio/gpu/protocol.rs b/devices/src/virtio/gpu/protocol.rs index 31f6869400..9ad80229e7 100644 --- a/devices/src/virtio/gpu/protocol.rs +++ b/devices/src/virtio/gpu/protocol.rs @@ -355,6 +355,7 @@ pub struct virtio_gpu_ctx_create { impl Default for virtio_gpu_ctx_create { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } diff --git a/devices/src/virtio/gpu/virtio_gpu.rs b/devices/src/virtio/gpu/virtio_gpu.rs index c0d00dfdc7..9457508795 100644 --- a/devices/src/virtio/gpu/virtio_gpu.rs +++ b/devices/src/virtio/gpu/virtio_gpu.rs @@ -71,11 +71,13 @@ use crate::virtio::resource_bridge::ResourceResponse; use crate::virtio::SharedMemoryMapper; pub fn to_rutabaga_descriptor(s: SafeDescriptor) -> RutabagaDescriptor { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { RutabagaDescriptor::from_raw_descriptor(s.into_raw_descriptor()) } } fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { SafeDescriptor::from_raw_descriptor(r.into_raw_descriptor()) } } @@ -357,9 +359,10 @@ impl VirtioGpuScanout { let mut transfer = Transfer3D::new_2d(0, 0, self.width, self.height); transfer.stride = fb.stride(); let fb_slice = fb.as_volatile_slice(); - let buf = IoSliceMut::new(unsafe { - std::slice::from_raw_parts_mut(fb_slice.as_mut_ptr(), fb_slice.size()) - }); + let buf = IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(fb_slice.as_mut_ptr(), fb_slice.size()) }, + ); rutabaga.transfer_read(0, resource.resource_id, transfer, Some(buf))?; display.flip(surface_id); @@ -930,7 +933,10 @@ impl VirtioGpu { buf: Option, ) -> VirtioGpuResult { let buf = buf.map(|vs| { - IoSliceMut::new(unsafe { std::slice::from_raw_parts_mut(vs.as_mut_ptr(), vs.size()) }) + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.as_mut_ptr(), vs.size()) }, + ) }); self.rutabaga .transfer_read(ctx_id, resource_id, transfer, buf)?; diff --git a/devices/src/virtio/input/evdev.rs b/devices/src/virtio/input/evdev.rs index 5eaf2d5efb..04a3de065b 100644 --- a/devices/src/virtio/input/evdev.rs +++ b/devices/src/virtio/input/evdev.rs @@ -115,10 +115,11 @@ fn errno() -> base::Error { /// Gets id information from an event device (see EVIOCGID ioctl for details). pub fn device_ids(descriptor: &T) -> Result { let mut dev_id = evdev_id::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_id and we check the return // value - ioctl_with_mut_ref(descriptor, EVIOCGID(), &mut dev_id) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGID(), &mut dev_id) } }; if len < 0 { return Err(InputError::EvdevIdError(errno())); @@ -134,10 +135,11 @@ pub fn device_ids(descriptor: &T) -> Result(descriptor: &T) -> Result> { let mut name = evdev_buffer::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_buffer and we check the // return value - ioctl_with_mut_ref(descriptor, EVIOCGNAME(), &mut name) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGNAME(), &mut name) } }; if len < 0 { return Err(InputError::EvdevNameError(errno())); @@ -148,10 +150,11 @@ pub fn name(descriptor: &T) -> Result> { /// Gets the unique (serial) name of an event device (see EVIOCGUNIQ ioctl for details). pub fn serial_name(descriptor: &T) -> Result> { let mut uniq = evdev_buffer::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_buffer and we check the // return value - ioctl_with_mut_ref(descriptor, EVIOCGUNIQ(), &mut uniq) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGUNIQ(), &mut uniq) } }; if len < 0 { return Err(InputError::EvdevSerialError(errno())); @@ -162,10 +165,11 @@ pub fn serial_name(descriptor: &T) -> Result> { /// Gets the properties of an event device (see EVIOCGPROP ioctl for details). pub fn properties(descriptor: &T) -> Result { let mut props = evdev_buffer::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_buffer and we check the // return value - ioctl_with_mut_ref(descriptor, EVIOCGPROP(), &mut props) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGPROP(), &mut props) } }; if len < 0 { return Err(InputError::EvdevPropertiesError(errno())); @@ -181,10 +185,11 @@ pub fn supported_events( let mut evts: BTreeMap = BTreeMap::new(); let mut evt_types = evdev_buffer::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_buffer and we check the // return value - ioctl_with_mut_ref(descriptor, EVIOCGBIT(0), &mut evt_types) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGBIT(0), &mut evt_types) } }; if len < 0 { return Err(InputError::EvdevEventTypesError(errno())); @@ -198,10 +203,11 @@ pub fn supported_events( } // Create a new zero-filled buffer every time to avoid carry-overs. let mut evt_codes = evdev_buffer::new(); - let len = unsafe { + let len = { + // SAFETY: // Safe because the kernel won't write more than size of evdev_buffer and we check the // return value - ioctl_with_mut_ref(descriptor, EVIOCGBIT(ev as c_uint), &mut evt_codes) + unsafe { ioctl_with_mut_ref(descriptor, EVIOCGBIT(ev as c_uint), &mut evt_codes) } }; if len < 0 { return Err(InputError::EvdevEventTypesError(errno())); @@ -218,10 +224,11 @@ pub fn abs_info(descriptor: &T) -> BTreeMap(descriptor: &T) -> BTreeMap(descriptor: &mut T) -> Result<()> { let val: u32 = 1; - let ret = unsafe { + let ret = { + // SAFETY: // Safe because the kernel only read the value of the ptr and we check the return value - ioctl_with_ref(descriptor, EVIOCGRAB(), &val) + unsafe { ioctl_with_ref(descriptor, EVIOCGRAB(), &val) } }; if ret == 0 { Ok(()) @@ -247,10 +255,11 @@ pub fn grab_evdev(descriptor: &mut T) -> Result<()> { } pub fn ungrab_evdev(descriptor: &mut T) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Safe because the kernel only reads the value of the ptr (doesn't dereference) and // we check the return value - ioctl_with_ptr(descriptor, EVIOCGRAB(), null::()) + unsafe { ioctl_with_ptr(descriptor, EVIOCGRAB(), null::()) } }; if ret == 0 { Ok(()) diff --git a/devices/src/virtio/iommu.rs b/devices/src/virtio/iommu.rs index 8978ce60b9..bd51aa6528 100644 --- a/devices/src/virtio/iommu.rs +++ b/devices/src/virtio/iommu.rs @@ -410,6 +410,7 @@ impl State { }; let vfio_map_result = match dmabuf_map { + // SAFETY: // Safe because [dmabuf_map, dmabuf_map + size) refers to an external mmap'ed region. Some(dmabuf_map) => unsafe { mapper diff --git a/devices/src/virtio/iommu/sys/linux/vfio_wrapper.rs b/devices/src/virtio/iommu/sys/linux/vfio_wrapper.rs index b14e9fdaf9..6b21ef4ae5 100644 --- a/devices/src/virtio/iommu/sys/linux/vfio_wrapper.rs +++ b/devices/src/virtio/iommu/sys/linux/vfio_wrapper.rs @@ -78,6 +78,7 @@ impl MemoryMapper for VfioWrapper { .context("failed to find host address")? as u64, ); + // SAFETY: // Safe because both guest and host address are guaranteed by // get_host_address_range() to be valid. unsafe { self.do_map(map) } diff --git a/devices/src/virtio/pvclock.rs b/devices/src/virtio/pvclock.rs index 0eb8809038..c8c7de1284 100644 --- a/devices/src/virtio/pvclock.rs +++ b/devices/src/virtio/pvclock.rs @@ -394,10 +394,11 @@ impl PvClockWorker { warn!("Suspend time already set, ignoring new suspend time"); return; } - // Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify any other - // memory. self.suspend_time = Some(PvclockInstant { time: Utc::now(), + // SAFETY: + // Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify any other + // memory. tsc_value: unsafe { _rdtsc() }, }); } @@ -448,10 +449,11 @@ impl PvClockWorker { fn set_suspended_time(&mut self) -> Result<()> { let (this_suspend_duration, this_suspend_tsc_delta) = if let Some(suspend_time) = self.suspend_time.take() { - // Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify any - // other memory. ( Self::get_suspended_duration(&suspend_time), + // SAFETY: + // Safe because _rdtsc takes no arguments, and we trust _rdtsc to not modify any + // other memory. unsafe { _rdtsc() } - suspend_time.tsc_value, ) } else { diff --git a/devices/src/virtio/snd/vios_backend/shm_streams.rs b/devices/src/virtio/snd/vios_backend/shm_streams.rs index 6339ec2768..7d8d475622 100644 --- a/devices/src/virtio/snd/vios_backend/shm_streams.rs +++ b/devices/src/virtio/snd/vios_backend/shm_streams.rs @@ -218,18 +218,16 @@ impl VioSndShmStream { ) -> GenericResult> { let interval = Duration::from_millis(buffer_size as u64 * 1000 / frame_rate as u64); - let dup_fd = unsafe { - // Safe because fcntl doesn't affect memory and client_shm should wrap a known valid - // file descriptor. - libc::fcntl(client_shm.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) - }; + // SAFETY: + // Safe because fcntl doesn't affect memory and client_shm should wrap a known valid + // file descriptor. + let dup_fd = unsafe { libc::fcntl(client_shm.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) }; if dup_fd < 0 { return Err(Box::new(Error::DupError(SysError::last()))); } - let file = unsafe { - // safe because we checked the result of libc::fcntl() - File::from_raw_fd(dup_fd) - }; + // SAFETY: + // safe because we checked the result of libc::fcntl() + let file = unsafe { File::from_raw_fd(dup_fd) }; let client_shm_clone = SharedMemory::from_file(file).map_err(Error::BaseMmapError)?; Ok(Box::new(Self { diff --git a/devices/src/virtio/snd/vios_backend/shm_vios.rs b/devices/src/virtio/snd/vios_backend/shm_vios.rs index 5efbaaf7e9..f69ce6d8f3 100644 --- a/devices/src/virtio/snd/vios_backend/shm_vios.rs +++ b/devices/src/virtio/snd/vios_backend/shm_vios.rs @@ -195,8 +195,9 @@ impl VioSClient { expected: usize, received: usize, ) -> Result { + // SAFETY: + // Safe because we transfer ownership from the SafeDescriptor to T unsafe { - // Safe because we transfer ownership from the SafeDescriptor to T Ok(T::from_raw_descriptor( safe_fds .pop() diff --git a/devices/src/virtio/vhost/user/device/fs/sys/linux.rs b/devices/src/virtio/vhost/user/device/fs/sys/linux.rs index 36fa03c046..dce2486c88 100644 --- a/devices/src/virtio/vhost/user/device/fs/sys/linux.rs +++ b/devices/src/virtio/vhost/user/device/fs/sys/linux.rs @@ -19,11 +19,13 @@ use crate::virtio::vhost::user::device::listener::sys::VhostUserListener; use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait; fn default_uidmap() -> String { + // SAFETY: trivially safe let euid = unsafe { libc::geteuid() }; format!("{} {} 1", euid, euid) } fn default_gidmap() -> String { + // SAFETY: trivially safe let egid = unsafe { libc::getegid() }; format!("{} {} 1", egid, egid) } @@ -76,11 +78,13 @@ fn jail_and_fork( let tz = std::env::var("TZ").unwrap_or_default(); // fork on the jail here + // SAFETY: trivially safe let pid = unsafe { j.fork(Some(&keep_rds))? }; if pid > 0 { // Current FS driver jail does not use seccomp and jail_and_fork() does not have other // users, so we do nothing here for seccomp_trace + // SAFETY: trivially safe unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) }; } @@ -119,6 +123,7 @@ pub fn start_device(opts: Options) -> anyhow::Result<()> { // Parent, nothing to do but wait and then exit if pid != 0 { + // SAFETY: trivially safe unsafe { libc::waitpid(pid, std::ptr::null_mut(), 0) }; return Ok(()); } @@ -131,12 +136,14 @@ pub fn start_device(opts: Options) -> anyhow::Result<()> { // TODO(crbug.com/1199487): Remove this once libc provides the wrapper for all targets. #[cfg(target_os = "linux")] { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let mut securebits = unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }; if securebits < 0 { bail!(io::Error::last_os_error()); } securebits |= SECBIT_NO_SETUID_FIXUP; + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }; if ret < 0 { diff --git a/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs b/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs index dab74e9d4b..dc43e0e578 100644 --- a/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs +++ b/devices/src/virtio/vhost/user/device/gpu/sys/linux.rs @@ -99,8 +99,11 @@ impl GpuBackend { // Start handling the display. let display = clone_descriptor(&*state.borrow_mut().display().borrow()) .map(|fd| { - // Safe because we just created this fd. - AsyncWrapper::new(unsafe { SafeDescriptor::from_raw_descriptor(fd) }) + AsyncWrapper::new( + // SAFETY: + // Safe because we just created this fd. + unsafe { SafeDescriptor::from_raw_descriptor(fd) }, + ) }) .context("failed to clone inner WaitContext for gpu display") .and_then(|ctx| { diff --git a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs index 6a3ecc3017..5084a9a676 100644 --- a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs +++ b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs @@ -120,6 +120,7 @@ impl GpuBackend { .clone(); // Start handling the display. + // SAFETY: // Safe because the raw descriptor is valid, and an event. let display = unsafe { EventAsync::clone_raw_without_reset(&*state.borrow_mut().display().borrow(), &self.ex) diff --git a/devices/src/virtio/vhost/user/device/handler.rs b/devices/src/virtio/vhost/user/device/handler.rs index f3e1901145..cd601d2bd9 100644 --- a/devices/src/virtio/vhost/user/device/handler.rs +++ b/devices/src/virtio/vhost/user/device/handler.rs @@ -912,8 +912,9 @@ impl SharedMemoryMapper for VhostShmemMapper { } => (descriptor, offset, size), VmMemorySource::SharedMemory(shmem) => { let size = shmem.size(); - // Safe because we own shmem. let descriptor = + // SAFETY: + // Safe because we own shmem. unsafe { SafeDescriptor::from_raw_descriptor(shmem.into_raw_descriptor()) }; (descriptor, 0, size) } diff --git a/devices/src/virtio/vhost/user/device/handler/sys/windows.rs b/devices/src/virtio/vhost/user/device/handler/sys/windows.rs index db7e80d04d..aa24317764 100644 --- a/devices/src/virtio/vhost/user/device/handler/sys/windows.rs +++ b/devices/src/virtio/vhost/user/device/handler/sys/windows.rs @@ -30,15 +30,18 @@ use crate::virtio::vhost::user::device::handler::DeviceRequestHandler; pub fn read_from_tube_transporter( raw_transport_tube: RawDescriptor, ) -> anyhow::Result { - // Safe because we know that raw_transport_tube is valid (passed by inheritance), and that - // the blocking & framing modes are accurate because we create them ourselves in the broker. - let tube_transporter = TubeTransporterReader::create_tube_transporter_reader(unsafe { - PipeConnection::from_raw_descriptor( - raw_transport_tube, - FramingMode::Message, - BlockingMode::Wait, - ) - }); + let tube_transporter = TubeTransporterReader::create_tube_transporter_reader( + // SAFETY: + // Safe because we know that raw_transport_tube is valid (passed by inheritance), and that + // the blocking & framing modes are accurate because we create them ourselves in the broker. + unsafe { + PipeConnection::from_raw_descriptor( + raw_transport_tube, + FramingMode::Message, + BlockingMode::Wait, + ) + }, + ); tube_transporter.read_tubes().map_err(anyhow::Error::msg) } diff --git a/devices/src/virtio/vhost/user/device/net/sys/linux.rs b/devices/src/virtio/vhost/user/device/net/sys/linux.rs index e5a74cddf3..34c15f16ce 100644 --- a/devices/src/virtio/vhost/user/device/net/sys/linux.rs +++ b/devices/src/virtio/vhost/user/device/net/sys/linux.rs @@ -98,6 +98,7 @@ where pub fn new_from_tap_fd(tap_fd: RawDescriptor) -> anyhow::Result { let tap_fd = validate_raw_descriptor(tap_fd).context("failed to validate tap fd")?; + // SAFETY: // Safe because we ensure that we get a unique handle to the fd. let tap = unsafe { T::from_raw_descriptor(tap_fd).context("failed to create tap device")? }; diff --git a/devices/src/virtio/vhost/user/device/wl.rs b/devices/src/virtio/vhost/user/device/wl.rs index ec157c8847..f1cdf3670d 100644 --- a/devices/src/virtio/vhost/user/device/wl.rs +++ b/devices/src/virtio/vhost/user/device/wl.rs @@ -269,6 +269,7 @@ impl VhostUserBackend for WlBackend { 0 => { let wlstate_ctx = clone_descriptor(wlstate.borrow().wait_ctx()) .map(|fd| { + // SAFETY: // Safe because we just created this fd. AsyncWrapper::new(unsafe { SafeDescriptor::from_raw_descriptor(fd) }) }) diff --git a/devices/src/virtio/video/decoder/backend/mod.rs b/devices/src/virtio/video/decoder/backend/mod.rs index f8ac2f7354..6e80f7626d 100644 --- a/devices/src/virtio/video/decoder/backend/mod.rs +++ b/devices/src/virtio/video/decoder/backend/mod.rs @@ -233,6 +233,7 @@ mod tests { #[allow(dead_code)] pub fn build_object_handle(mem: &SharedMemory) -> GuestResourceHandle { GuestResourceHandle::VirtioObject(VirtioObjectHandle { + // SAFETY: // Safe because we are taking ownership of a just-duplicated FD. desc: unsafe { SafeDescriptor::from_raw_descriptor(base::clone_descriptor(mem).unwrap()) @@ -246,6 +247,7 @@ mod tests { #[allow(dead_code)] pub fn build_guest_mem_handle(mem: &SharedMemory) -> GuestResourceHandle { GuestResourceHandle::GuestPages(GuestMemHandle { + // SAFETY: // Safe because we are taking ownership of a just-duplicated FD. desc: unsafe { SafeDescriptor::from_raw_descriptor(base::clone_descriptor(mem).unwrap()) diff --git a/devices/src/virtio/video/decoder/backend/vaapi.rs b/devices/src/virtio/video/decoder/backend/vaapi.rs index 6607ee4b13..452e0b8c6d 100644 --- a/devices/src/virtio/video/decoder/backend/vaapi.rs +++ b/devices/src/virtio/video/decoder/backend/vaapi.rs @@ -470,6 +470,7 @@ impl<'a, T: AsBufferHandle> BufferMapping<'a, T> { impl<'a, T: AsBufferHandle> AsRef<[u8]> for BufferMapping<'a, T> { fn as_ref(&self) -> &[u8] { let mapping = &self.mapping; + // SAFETY: // Safe because the mapping is linear and we own it, so it will not be unmapped during // the lifetime of this slice. unsafe { std::slice::from_raw_parts(mapping.as_ptr(), mapping.size()) } @@ -479,6 +480,7 @@ impl<'a, T: AsBufferHandle> AsRef<[u8]> for BufferMapping<'a, T> { impl<'a, T: AsBufferHandle> AsMut<[u8]> for BufferMapping<'a, T> { fn as_mut(&mut self) -> &mut [u8] { let mapping = &self.mapping; + // SAFETY: // Safe because the mapping is linear and we own it, so it will not be unmapped during // the lifetime of this slice. unsafe { std::slice::from_raw_parts_mut(mapping.as_ptr(), mapping.size()) } @@ -898,6 +900,7 @@ impl DecoderSession for VaapiDecoderSession { BufferDescriptor::GuestMem(GuestMemDescriptor(handle)) } GuestResourceHandle::VirtioObject(handle) => { + // SAFETY: descriptor is expected to be valid let fd = unsafe { OwnedFd::from_raw_fd(handle.desc.into_raw_descriptor()) }; let modifier = handle.modifier; diff --git a/devices/src/virtio/video/decoder/mod.rs b/devices/src/virtio/video/decoder/mod.rs index 40100146ed..d1a0e75978 100644 --- a/devices/src/virtio/video/decoder/mod.rs +++ b/devices/src/virtio/video/decoder/mod.rs @@ -570,6 +570,7 @@ impl Decoder { return Err(VideoError::InvalidArgument); } GuestResource::from_virtio_object_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. // unwrap() is also safe here because we just tested above that `entries` had // exactly one element. @@ -580,6 +581,7 @@ impl Decoder { .map_err(|_| VideoError::InvalidArgument)? } ResourceType::GuestPages => GuestResource::from_virtio_guest_mem_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. unsafe { std::slice::from_raw_parts( diff --git a/devices/src/virtio/video/encoder/backend/ffmpeg.rs b/devices/src/virtio/video/encoder/backend/ffmpeg.rs index 66af4e4783..cd4b63619c 100644 --- a/devices/src/virtio/video/encoder/backend/ffmpeg.rs +++ b/devices/src/virtio/video/encoder/backend/ffmpeg.rs @@ -205,6 +205,7 @@ impl FfmpegEncoderSession { "encoded packet does not fit in output buffer" ))); } + // SAFETY: // Safe because packet.as_ref().data and out_buf.as_ptr() are valid references and // we did bound check above. unsafe { diff --git a/devices/src/virtio/video/encoder/mod.rs b/devices/src/virtio/video/encoder/mod.rs index f4733fe0f1..f33828c312 100644 --- a/devices/src/virtio/video/encoder/mod.rs +++ b/devices/src/virtio/video/encoder/mod.rs @@ -669,6 +669,7 @@ impl EncoderDevice { return Err(VideoError::InvalidArgument); } GuestResource::from_virtio_object_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. // unwrap() is also safe here because we just tested above that `entries` had // exactly one element. @@ -679,6 +680,7 @@ impl EncoderDevice { .map_err(|_| VideoError::InvalidArgument)? } ResourceType::GuestPages => GuestResource::from_virtio_guest_mem_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. unsafe { std::slice::from_raw_parts( @@ -717,6 +719,7 @@ impl EncoderDevice { return Err(VideoError::InvalidArgument); } GuestResource::from_virtio_object_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. // unwrap() is also safe here because we just tested above that `entries` had // exactly one element. @@ -727,6 +730,7 @@ impl EncoderDevice { .map_err(|_| VideoError::InvalidArgument)? } ResourceType::GuestPages => GuestResource::from_virtio_guest_mem_entry( + // SAFETY: // Safe because we confirmed the correct type for the resource. unsafe { std::slice::from_raw_parts( diff --git a/devices/src/virtio/video/protocol.rs b/devices/src/virtio/video/protocol.rs index 241437ba3d..71986c3de1 100644 --- a/devices/src/virtio/video/protocol.rs +++ b/devices/src/virtio/video/protocol.rs @@ -205,6 +205,7 @@ pub struct virtio_video_stream_create { } impl Default for virtio_video_stream_create { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } diff --git a/devices/src/virtio/video/resource.rs b/devices/src/virtio/video/resource.rs index c5767b6ab2..00202ab778 100644 --- a/devices/src/virtio/video/resource.rs +++ b/devices/src/virtio/video/resource.rs @@ -52,12 +52,16 @@ pub union UnresolvedResourceEntry { impl fmt::Debug for UnresolvedResourceEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Safe because `self.object` and `self.guest_mem` are the same size and both made of - // integers, making it safe to display them no matter their value. write!( f, "unresolved {:?} or {:?}", + // SAFETY: + // Safe because `self.object` and `self.guest_mem` are the same size and both made of + // integers, making it safe to display them no matter their value. unsafe { self.object }, + // SAFETY: + // Safe because `self.object` and `self.guest_mem` are the same size and both made of + // integers, making it safe to display them no matter their value. unsafe { self.guest_mem } ) } @@ -230,6 +234,7 @@ impl GuestResource { .map_err(GuestMemResourceCreationError::CantGetShmRegion)?; let desc = base::clone_descriptor(guest_region) .map_err(GuestMemResourceCreationError::DescriptorCloneError)?; + // SAFETY: // Safe because we are the sole owner of the duplicated descriptor. unsafe { SafeDescriptor::from_raw_descriptor(desc) } } @@ -313,6 +318,7 @@ impl GuestResource { }; let handle = GuestResourceHandle::VirtioObject(VirtioObjectHandle { + // SAFETY: // Safe because `buffer_info.file` is a valid file descriptor and we are stealing // it. desc: unsafe { @@ -409,6 +415,7 @@ mod tests { // Create the `GuestMemHandle` we will try to map and retrieve the data from. let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle { + // SAFETY: descriptor is expected to be valid desc: unsafe { SafeDescriptor::from_raw_descriptor(base::clone_descriptor(&mem).unwrap()) }, @@ -425,6 +432,7 @@ mod tests { // that its u32s appear to increase linearly. let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap(); let mut data = vec![0u8; PAGE_SIZE * page_order.len()]; + // SAFETY: src and dst are valid and aligned unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) }; for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() { let sized_chunk: &[u8; 4] = chunk.try_into().unwrap(); diff --git a/devices/src/virtio/video/worker.rs b/devices/src/virtio/video/worker.rs index 66c6a2d505..078e164519 100644 --- a/devices/src/virtio/video/worker.rs +++ b/devices/src/virtio/video/worker.rs @@ -404,9 +404,11 @@ impl Worker { let device_evt = ex .async_from(AsyncWrapper::new( clone_descriptor(&device_wait_ctx) + .map(|fd| + // SAFETY: // Safe because we just created this fd. - .map(|fd| unsafe { SafeDescriptor::from_raw_descriptor(fd) }) - .map_err(Error::CloneDescriptorFailed)?, + unsafe { SafeDescriptor::from_raw_descriptor(fd) }) + .map_err(Error::CloneDescriptorFailed)?, )) .map_err(Error::EventAsyncCreationFailed)?; diff --git a/devices/src/virtio/vsock/sys/windows/vsock.rs b/devices/src/virtio/vsock/sys/windows/vsock.rs index 996da2b851..b022b699ba 100644 --- a/devices/src/virtio/vsock/sys/windows/vsock.rs +++ b/devices/src/virtio/vsock/sys/windows/vsock.rs @@ -650,6 +650,7 @@ impl Worker { // Start reading again so we receive the message and // event signal immediately. + // SAFETY: // Unsafe because the read could happen at any time // after this function is called. We ensure safety // by allocating the buffer and overlapped struct @@ -798,6 +799,7 @@ impl Worker { let mut buffer = Box::new([0u8; TEMP_READ_BUF_SIZE_BYTES]); info!("vsock: port {}: created client pipe", port); + // SAFETY: // Unsafe because the read could happen at any time // after this function is called. We ensure safety // by allocating the buffer and overlapped struct diff --git a/devices/src/virtio/wl.rs b/devices/src/virtio/wl.rs index a7315386ff..c6520e5617 100644 --- a/devices/src/virtio/wl.rs +++ b/devices/src/virtio/wl.rs @@ -215,6 +215,7 @@ ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info); fn is_fence(f: &File) -> bool { let info = sync_file_info::default(); + // SAFETY: // Safe as f is a valid file unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO(), &info) == 0 } } @@ -444,6 +445,7 @@ struct VmRequester { // The following are wrappers to avoid base dependencies in the rutabaga crate #[cfg(feature = "minigbm")] fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { SafeDescriptor::from_raw_descriptor(r.into_raw_descriptor()) } } @@ -709,11 +711,13 @@ impl CtrlVfdSendVfdV2 { self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU ); + // SAFETY: trivially safe given we assert kind unsafe { self.payload.id } } #[cfg(feature = "gpu")] fn seqno(&self) -> Le64 { assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE); + // SAFETY: trivially safe given we assert kind unsafe { self.payload.seqno } } } @@ -870,6 +874,7 @@ impl WlVfd { let sync = dma_buf_sync { flags: flags as u64, }; + // SAFETY: // Safe as descriptor is a valid dmabuf and incorrect flags will return an error. if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 { Err(WlError::DmabufSync(io::Error::last_os_error())) @@ -1430,6 +1435,7 @@ impl WlState { match self.signaled_fence.as_ref().unwrap().try_clone() { Ok(dup) => { *descriptor = dup.into_raw_descriptor(); + // SAFETY: // Safe because the fd comes from a valid SafeDescriptor. let file = unsafe { File::from_raw_descriptor(*descriptor) }; bridged_files.push(file); diff --git a/devices/src/vmwdt.rs b/devices/src/vmwdt.rs index a0489d8657..cf1a3746a1 100644 --- a/devices/src/vmwdt.rs +++ b/devices/src/vmwdt.rs @@ -220,6 +220,7 @@ impl Vmwdt { .and_then(|guest_time| guest_time.parse::().ok()) .unwrap_or(0); + // SAFETY: // Safe because this just returns an integer let ticks_per_sec = unsafe { libc::sysconf(libc::_SC_CLK_TCK) } as u64; Ok((gtime_ticks * 1000 / ticks_per_sec) as i64) diff --git a/e2e_tests/fixture/src/sys/linux.rs b/e2e_tests/fixture/src/sys/linux.rs index d5c506d70c..661293b8f5 100644 --- a/e2e_tests/fixture/src/sys/linux.rs +++ b/e2e_tests/fixture/src/sys/linux.rs @@ -51,6 +51,7 @@ pub fn binary_name() -> &'static str { /// Safe wrapper for libc::mkfifo pub(crate) fn mkfifo(path: &Path) -> io::Result<()> { let cpath = CString::new(path.to_str().unwrap()).unwrap(); + // SAFETY: no mutable pointer passed to function and the return value is checked. let result = unsafe { libc::mkfifo(cpath.as_ptr(), 0o777) }; if result == 0 { Ok(()) diff --git a/e2e_tests/tests/pci_hotplug.rs b/e2e_tests/tests/pci_hotplug.rs index e0cacd5ee0..9c01cfae81 100644 --- a/e2e_tests/tests/pci_hotplug.rs +++ b/e2e_tests/tests/pci_hotplug.rs @@ -47,6 +47,7 @@ where /// setup a tap device for test fn setup_tap_device(tap_name: &[u8], ip_addr: Ipv4Addr, netmask: Ipv4Addr, mac_addr: MacAddress) { let tap = Tap::new_with_name(tap_name, true, false).unwrap(); + // SAFETY: // ioctl is safe since we call it with a valid tap fd and check the return value. let ret = unsafe { ioctl_with_val(&tap, net_sys::TUNSETPERSIST(), 1) }; if ret < 0 { diff --git a/fuse/src/mount.rs b/fuse/src/mount.rs index 057e2c0305..dc6dbf09b3 100644 --- a/fuse/src/mount.rs +++ b/fuse/src/mount.rs @@ -73,6 +73,7 @@ pub fn mount>( let mountpoint = CString::new(mountpoint.as_ref().as_bytes())?; let mount_options = CString::new(join_mount_options(options))?; + // SAFETY: // Safe because pointer arguments all points to null-terminiated CStrings. let retval = unsafe { libc::mount( diff --git a/gpu_display/src/gpu_display_win/keyboard_input_manager.rs b/gpu_display/src/gpu_display_win/keyboard_input_manager.rs index 665039881b..2a28df16aa 100644 --- a/gpu_display/src/gpu_display_win/keyboard_input_manager.rs +++ b/gpu_display/src/gpu_display_win/keyboard_input_manager.rs @@ -120,9 +120,14 @@ impl KeyboardInputManager { fn release_any_down_keys(&self) { let mut events = Vec::with_capacity(256); let mut keyboard_state: [u8; 256] = [0; 256]; + // SAFETY: // Safe because `keyboard_state` is guaranteed to exist, and is of the expected size. if unsafe { GetKeyboardState(keyboard_state.as_mut_ptr()) } == 0 { - error!("Failed in GetKeyboardState: {}", unsafe { GetLastError() }); + error!( + "Failed in GetKeyboardState: {}", + // SAFETY: trivially safe + unsafe { GetLastError() } + ); return; } @@ -142,6 +147,7 @@ impl KeyboardInputManager { continue; } + // SAFETY: // Trivially safe (no pointers or errors to check). let scancode = unsafe { MapVirtualKeyW(virtual_keycode as u32, MAPVK_VK_TO_VSC) }; if let Some(linux_keycode) = self.keycode_translator.translate(scancode) { @@ -180,6 +186,7 @@ struct KeyStates { /// On success, returns a tuple containing current state of caps lock and num lock keys. fn get_host_key_states() -> Option { let mut keyboard_state: [BYTE; 256] = [0; 256]; + // SAFETY: // Safe because `keyboard_state` is guaranteed to exist, and is of the expected size. if unsafe { GetKeyboardState(keyboard_state.as_mut_ptr()) } != 0 { Some(KeyStates { @@ -187,7 +194,11 @@ fn get_host_key_states() -> Option { num_lock_state: toggle_to_bool(keyboard_state[VK_NUMLOCK as usize]), }) } else { - warn!("Failed in GetKeyboardState: {}", unsafe { GetLastError() }); + warn!( + "Failed in GetKeyboardState: {}", + // SAFETY: trivially safe + unsafe { GetLastError() } + ); None } } diff --git a/gpu_display/src/gpu_display_win/window.rs b/gpu_display/src/gpu_display_win/window.rs index 6d895fc103..8846c8ad82 100644 --- a/gpu_display/src/gpu_display_win/window.rs +++ b/gpu_display/src/gpu_display_win/window.rs @@ -208,6 +208,7 @@ pub(crate) trait BasicWindow { unsafe fn handle(&self) -> HWND; fn is_same_window(&self, hwnd: HWND) -> bool { + // SAFETY: // Safe because we are just comparing handle values. hwnd == unsafe { self.handle() } } @@ -227,6 +228,7 @@ pub(crate) trait BasicWindow { /// Calls `RemovePropW()` internally. fn remove_property(&self, property: &str) -> Result<()> { + // SAFETY: // Safe because the window object won't outlive the HWND, and failures are handled below. unsafe { SetLastError(0); @@ -240,6 +242,7 @@ pub(crate) trait BasicWindow { /// Calls `DestroyWindow()` internally. fn destroy(&self) -> Result<()> { + // SAFETY: // Safe because the window object won't outlive the HWND. if unsafe { DestroyWindow(self.handle()) } == 0 { syscall_bail!("Failed to call DestroyWindow()"); @@ -301,6 +304,7 @@ impl GuiWindow { /// Calls `IsWindow()` internally. Returns true if the HWND identifies an existing window. pub fn is_valid(&self) -> bool { + // SAFETY: // Safe because it is called from the same thread the created the window. unsafe { IsWindow(self.hwnd) != 0 } } @@ -327,6 +331,7 @@ impl GuiWindow { /// Calls `GetWindowLongPtrW()` internally. pub fn get_attribute(&self, index: i32) -> Result { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, and failures are handled below. unsafe { // GetWindowLongPtrW() may return zero if we haven't set that attribute before, so we @@ -342,6 +347,7 @@ impl GuiWindow { /// Calls `SetWindowLongPtrW()` internally. pub fn set_attribute(&self, index: i32, value: isize) -> Result<()> { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, and failures are handled below. unsafe { // SetWindowLongPtrW() may return zero if the previous value of that attribute was zero, @@ -358,6 +364,7 @@ impl GuiWindow { /// Calls `GetWindowRect()` internally. pub fn get_window_rect(&self) -> Result { let mut rect: RECT = Default::default(); + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `rect` is valid, and // failures are handled below. unsafe { @@ -376,6 +383,7 @@ impl GuiWindow { /// Calls `GetClientRect()` internally. pub fn get_client_rect(&self) -> Result { let mut rect: RECT = Default::default(); + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `rect` is valid, and // failures are handled below. unsafe { @@ -404,6 +412,7 @@ impl GuiWindow { /// specified point to screen coordinates. pub fn client_to_screen(&self, point: &Point) -> Result { let mut point = point.to_sys_point(); + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `point` is valid, and // failures are handled below. unsafe { @@ -419,6 +428,7 @@ impl GuiWindow { pub fn screen_to_client(&self, point: Point) -> Result { let mut point = point.to_sys_point(); + // SAFETY: // Safe because: // 1. point is stack allocated & lives as long as the function call. // 2. the window handle is guaranteed valid by self. @@ -435,6 +445,7 @@ impl GuiWindow { /// Calls `MonitorFromWindow()` internally. If the window is not on any active display monitor, /// returns the handle to the closest one. pub fn get_nearest_monitor_handle(&self) -> HMONITOR { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { MonitorFromWindow(self.hwnd, MONITOR_DEFAULTTONEAREST) } } @@ -442,18 +453,21 @@ impl GuiWindow { /// Calls `MonitorFromWindow()` internally. If the window is not on any active display monitor, /// returns the info of the closest one. pub fn get_monitor_info(&self) -> Result { + // SAFETY: // Safe because `get_nearest_monitor_handle()` always returns a valid monitor handle. unsafe { MonitorInfo::new(self.get_nearest_monitor_handle()) } } /// Calls `MonitorFromWindow()` internally. pub fn is_on_active_display(&self) -> bool { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { !MonitorFromWindow(self.hwnd, MONITOR_DEFAULTTONULL).is_null() } } /// Calls `SetWindowPos()` internally. pub fn set_pos(&self, window_rect: &Rect, flags: u32) -> Result<()> { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, and failures are handled below. unsafe { if SetWindowPos( @@ -486,6 +500,7 @@ impl GuiWindow { /// Calls `ShowWindow()` internally. Note that it is more preferable to call `set_pos()` with /// `SWP_SHOWWINDOW` since that would set the error code on failure. pub fn show(&self) { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { ShowWindow(self.hwnd, SW_SHOW); @@ -494,6 +509,7 @@ impl GuiWindow { /// Calls `ShowWindow()` internally to restore a minimized window. pub fn restore(&self) { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { ShowWindow(self.hwnd, SW_RESTORE); @@ -504,6 +520,7 @@ impl GuiWindow { /// is restored. For example, if we have switched from maximized to fullscreen, this function /// would still return true. pub fn was_maximized(&self) -> bool { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { IsZoomed(self.hwnd) != 0 } } @@ -511,6 +528,7 @@ impl GuiWindow { /// Calls `IsWindowVisible()` internally. We also require that the window size is nonzero to be /// considered visible. pub fn is_visible(&self) -> Result { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. if unsafe { IsWindowVisible(self.hwnd) } != 0 { let window_rect = self @@ -529,6 +547,7 @@ impl GuiWindow { /// user is currently working. It might belong to a different thread/process than the calling /// thread. pub fn is_global_foreground_window(&self) -> bool { + // SAFETY: // Safe because there is no argument. unsafe { GetForegroundWindow() == self.hwnd } } @@ -537,12 +556,14 @@ impl GuiWindow { /// currently working and is attached to the calling thread's message queue. It is possible that /// there is no active window if the foreground focus is on another thread/process. pub fn is_thread_foreground_window(&self) -> bool { + // SAFETY: // Safe because there is no argument. unsafe { GetActiveWindow() == self.hwnd } } /// Calls `IsIconic()` internally. pub fn is_minimized(&self) -> bool { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { IsIconic(self.hwnd) != 0 } } @@ -550,6 +571,7 @@ impl GuiWindow { /// Calls `SetForegroundWindow()` internally. `SetForegroundWindow()` may fail, for example, /// when the taskbar is in the foreground, hence this is a best-effort call. pub fn bring_to_foreground(&self) { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. if unsafe { SetForegroundWindow(self.hwnd) } == 0 { info!("Cannot bring the window to foreground."); @@ -566,6 +588,7 @@ impl GuiWindow { hRgnBlur: null_mut(), fTransitionOnMaximized: FALSE, }; + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `blur_behind` is valid, // and failures are handled below. let errno = unsafe { DwmEnableBlurBehindWindow(self.hwnd, &blur_behind) }; @@ -588,6 +611,7 @@ impl GuiWindow { dw_ex_style: u32, ) -> Result { let mut window_rect: RECT = client_rect.to_sys_rect(); + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `window_rect` is valid, // and failures are handled below. unsafe { @@ -611,6 +635,7 @@ impl GuiWindow { length: mem::size_of::().try_into().unwrap(), ..Default::default() }; + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND, we know `window_placement` is // valid, and failures are handled below. unsafe { @@ -627,6 +652,7 @@ impl GuiWindow { /// Calls `PostMessageW()` internally. pub fn post_message(&self, msg: UINT, w_param: WPARAM, l_param: LPARAM) -> Result<()> { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { if PostMessageW(self.hwnd, msg, w_param, l_param) == 0 { @@ -638,12 +664,14 @@ impl GuiWindow { /// Calls `DefWindowProcW()` internally. pub fn default_process_message(&self, packet: &MessagePacket) -> LRESULT { + // SAFETY: // Safe because `GuiWindow` object won't outlive the HWND. unsafe { DefWindowProcW(self.hwnd, packet.msg, packet.w_param, packet.l_param) } } /// Calls `LoadIconW()` internally. pub(crate) fn load_custom_icon(hinstance: HINSTANCE, resource_id: WORD) -> Result { + // SAFETY: // Safe because we handle failures below. unsafe { let hicon = LoadIconW(hinstance, MAKEINTRESOURCEW(resource_id)); @@ -656,6 +684,7 @@ impl GuiWindow { /// Calls `LoadCursorW()` internally. pub(crate) fn load_system_cursor(cursor_id: LPCWSTR) -> Result { + // SAFETY: // Safe because we handle failures below. unsafe { let hcursor = LoadCursorW(null_mut(), cursor_id); @@ -668,6 +697,7 @@ impl GuiWindow { /// Calls `GetStockObject()` internally. pub(crate) fn create_opaque_black_brush() -> Result { + // SAFETY: // Safe because we handle failures below. unsafe { let hobject = GetStockObject(BLACK_BRUSH as i32); @@ -681,6 +711,7 @@ impl GuiWindow { impl Drop for GuiWindow { fn drop(&mut self) { + // SAFETY: // Safe because it is called from the same thread the created the window. if unsafe { IsWindow(self.hwnd) } == 0 { error!("The underlying HWND is invalid when Window is being dropped!") @@ -744,6 +775,7 @@ fn create_sys_window( hwnd_parent: HWND, initial_window_size: &Size2D, ) -> Result { + // SAFETY: // Safe because we handle failures below. let hwnd = unsafe { CreateWindowExW( @@ -770,12 +802,14 @@ fn create_sys_window( /// Calls `GetModuleHandleW()` internally. pub(crate) fn get_current_module_handle() -> HMODULE { + // SAFETY: // Safe because we handle failures below. let hmodule = unsafe { GetModuleHandleW(null_mut()) }; if hmodule.is_null() { // If it fails, we are in a very broken state and it doesn't make sense to keep running. panic!( "Failed to call GetModuleHandleW() for the current module (Error code {})", + // SAFETY: trivially safe unsafe { GetLastError() } ); } @@ -811,6 +845,7 @@ impl MonitorInfo { // https://support.microsoft.com/en-us/topic/kb4570006-update-to-disable-and-remove-the-remotefx-vgpu-component-in-windows-bbdf1531-7188-2bf4-0de6-641de79f09d2 // So, we are only calling `GetSystemMetrics(SM_REMOTESESSION)` here until this changes in // the future. + // SAFETY: // Safe because no memory management is needed for arguments. let is_rdp_session = unsafe { GetSystemMetrics(SM_REMOTESESSION) != 0 }; Ok(Self { @@ -849,6 +884,7 @@ impl MonitorInfo { fn get_monitor_dpi(hmonitor: HMONITOR) -> i32 { let mut dpi_x = 0; let mut dpi_y = 0; + // SAFETY: // This is always safe since `GetDpiForMonitor` won't crash if HMONITOR is invalid, but // return E_INVALIDARG. unsafe { diff --git a/gpu_display/src/gpu_display_win/window_message_dispatcher.rs b/gpu_display/src/gpu_display_win/window_message_dispatcher.rs index 971154408e..776764a11a 100644 --- a/gpu_display/src/gpu_display_win/window_message_dispatcher.rs +++ b/gpu_display/src/gpu_display_win/window_message_dispatcher.rs @@ -210,8 +210,10 @@ impl WindowMessageDispatcher { fn attach_thread_message_router(self: Pin<&mut Self>) -> Result<()> { let dispatcher_ptr = &*self as *const Self; + // SAFETY: // Safe because we won't move the dispatcher out of it. match unsafe { &self.get_unchecked_mut().message_router_window } { + // SAFETY: // Safe because we guarantee the dispatcher outlives the thread message router. Some(router) => unsafe { Self::store_pointer_in_window(dispatcher_ptr, router) }, None => bail!("Thread message router not found, cannot associate with dispatcher!"), @@ -222,8 +224,10 @@ impl WindowMessageDispatcher { if !window.is_valid() { bail!("Window handle is invalid!"); } + // SAFETY: // Safe because we guarantee the dispatcher outlives our GUI windows. unsafe { Self::store_pointer_in_window(&*self, &window)? }; + // SAFETY: // Safe because we won't move the dispatcher out of it, and the dispatcher is aware of the // lifecycle of the window. unsafe { @@ -251,6 +255,7 @@ impl WindowMessageDispatcher { WM_USER_HANDLE_DISPLAY_MESSAGE_INTERNAL => { let _trace_event = trace_event!(gpu_display, "WM_USER_HANDLE_DISPLAY_MESSAGE_INTERNAL"); + // SAFETY: // Safe because the sender gives up the ownership and expects the receiver to // destruct the message. let message = unsafe { Box::from_raw(l_param as *mut DisplaySendToWndProc) }; @@ -266,6 +271,7 @@ impl WindowMessageDispatcher { _ => { let _trace_event = trace_event!(gpu_display, "WM_OTHER_MESSAGE_ROUTER_WINDOW_MESSAGE"); + // SAFETY: // Safe because we are processing a message targeting the message router window. return unsafe { DefWindowProcW(message_router_hwnd, msg, w_param, l_param) }; } @@ -373,6 +379,7 @@ impl WindowMessageDispatcher { fn request_exit_message_loop() { info!("Posting WM_QUIT"); + // SAFETY: // Safe because it will always succeed. unsafe { PostQuitMessage(0); diff --git a/gpu_display/src/gpu_display_win/window_message_processor.rs b/gpu_display/src/gpu_display_win/window_message_processor.rs index c623fb9d18..7e7bb30ca8 100644 --- a/gpu_display/src/gpu_display_win/window_message_processor.rs +++ b/gpu_display/src/gpu_display_win/window_message_processor.rs @@ -306,6 +306,7 @@ impl From<&MessagePacket> for WindowMessage { Self::WindowPos(WindowPosMessage::WindowPosChanging { l_param }) } WM_WINDOWPOSCHANGED => { + // SAFETY: // Safe because it will live at least until we finish handling // `WM_WINDOWPOSCHANGED`. let window_pos: WINDOWPOS = unsafe { *(l_param as *mut WINDOWPOS) }; diff --git a/gpu_display/src/gpu_display_win/window_procedure_thread.rs b/gpu_display/src/gpu_display_win/window_procedure_thread.rs index 59f8609c75..c4414ff312 100644 --- a/gpu_display/src/gpu_display_win/window_procedure_thread.rs +++ b/gpu_display/src/gpu_display_win/window_procedure_thread.rs @@ -184,6 +184,7 @@ impl RegisterWindowClass for GuiWindow { hIconSm: hicon, }; + // SAFETY: // Safe because we know the lifetime of `window_class`, and we handle failures below. if unsafe { RegisterClassExW(&window_class) } == 0 { syscall_bail!("Failed to call RegisterClassExW()"); @@ -213,6 +214,7 @@ impl RegisterWindowClass for MessageOnlyWindow { hIconSm: null_mut(), }; + // SAFETY: // Safe because we know the lifetime of `window_class`, and we handle failures below. if unsafe { RegisterClassExW(&window_class) } == 0 { syscall_bail!("Failed to call RegisterClassExW()"); @@ -301,6 +303,7 @@ impl WindowProcedureThread { if !self.is_message_loop_running() { bail!("Cannot post message to WndProc thread because message loop is not running!"); } + // SAFETY: // Safe because the message loop is still running. if unsafe { PostMessageW(self.message_router_handle, msg, w_param, l_param) } == 0 { syscall_bail!("Failed to call PostMessageW()"); @@ -329,6 +332,7 @@ impl WindowProcedureThread { gpu_main_display_tube: Option, ) { let gpu_main_display_tube = gpu_main_display_tube.map(Rc::new); + // SAFETY: // Safe because the dispatcher will take care of the lifetime of the `MessageOnlyWindow` and // `GuiWindow` objects. match unsafe { Self::create_windows() }.and_then(|(message_router_window, gui_window)| { @@ -342,8 +346,9 @@ impl WindowProcedureThread { info!("WndProc thread entering message loop"); message_loop_state.store(MessageLoopState::Running as i32, Ordering::SeqCst); - // Safe because we won't use the handle unless the message loop is still running. let message_router_handle = + // SAFETY: + // Safe because we won't use the handle unless the message loop is still running. unsafe { dispatcher.message_router_handle().unwrap_or(null_mut()) }; // HWND cannot be sent cross threads, so we cast it to u32 first. if let Err(e) = message_router_handle_sender.send(Ok(message_router_handle as u32)) @@ -384,6 +389,7 @@ impl WindowProcedureThread { } loop { + // SAFETY: // Safe because the lifetime of handles are at least as long as the function call. match unsafe { msg_wait_ctx.wait() } { Ok(token) => match token { @@ -429,6 +435,7 @@ impl WindowProcedureThread { // Safe because if `message` is initialized, we will call `assume_init()` to extract the // value, which will get dropped eventually. let mut message = mem::MaybeUninit::uninit(); + // SAFETY: // Safe because `message` lives at least as long as the function call. if unsafe { PeekMessageW( @@ -443,6 +450,7 @@ impl WindowProcedureThread { return true; } + // SAFETY: // Safe because `PeekMessageW()` has populated `message`. unsafe { let new_message = message.assume_init(); @@ -621,6 +629,7 @@ impl Drop for WindowProcedureThread { } } +// SAFETY: // Since `WindowProcedureThread` does not hold anything that cannot be transferred between threads, // we can implement `Send` for it. unsafe impl Send for WindowProcedureThread {} diff --git a/gpu_display/src/gpu_display_wl.rs b/gpu_display/src/gpu_display_wl.rs index 9d9f3e67a4..cdc0974688 100644 --- a/gpu_display/src/gpu_display_wl.rs +++ b/gpu_display/src/gpu_display_wl.rs @@ -48,6 +48,7 @@ struct DwlContext(*mut dwl_context); impl Drop for DwlContext { fn drop(&mut self) { if !self.0.is_null() { + // SAFETY: // Safe given that we checked the pointer for non-null and it should always be of the // correct type. unsafe { @@ -59,6 +60,7 @@ impl Drop for DwlContext { impl AsRawDescriptor for DwlContext { fn as_raw_descriptor(&self) -> RawDescriptor { + // SAFETY: // Safe given that the context pointer is valid. unsafe { dwl_context_fd(self.0) } } @@ -71,6 +73,7 @@ impl GpuDisplayImport for DwlDmabuf {} impl Drop for DwlDmabuf { fn drop(&mut self) { if !self.0.is_null() { + // SAFETY: // Safe given that we checked the pointer for non-null and it should always be of the // correct type. unsafe { @@ -84,6 +87,7 @@ struct DwlSurface(*mut dwl_surface); impl Drop for DwlSurface { fn drop(&mut self) { if !self.0.is_null() { + // SAFETY: // Safe given that we checked the pointer for non-null and it should always be of the // correct type. unsafe { @@ -109,6 +113,7 @@ impl WaylandSurface { impl GpuDisplaySurface for WaylandSurface { fn surface_descriptor(&self) -> u64 { + // SAFETY: // Safe if the surface is valid. let pointer = unsafe { dwl_surface_descriptor(self.surface.0) }; pointer as u64 @@ -130,11 +135,13 @@ impl GpuDisplaySurface for WaylandSurface { fn next_buffer_in_use(&self) -> bool { let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT; + // SAFETY: // Safe because only a valid surface and buffer index is used. unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) } } fn close_requested(&self) -> bool { + // SAFETY: // Safe because only a valid surface is used. unsafe { dwl_surface_close_requested(self.surface()) } } @@ -143,6 +150,7 @@ impl GpuDisplaySurface for WaylandSurface { self.buffer_index .set((self.buffer_index.get() + 1) % BUFFER_COUNT); + // SAFETY: // Safe because only a valid surface and buffer index is used. unsafe { dwl_surface_flip(self.surface(), self.buffer_index.get()); @@ -150,11 +158,13 @@ impl GpuDisplaySurface for WaylandSurface { } fn flip_to(&mut self, import_id: u32) { + // SAFETY: // Safe because only a valid surface and import_id is used. unsafe { dwl_surface_flip_to(self.surface(), import_id) } } fn commit(&mut self) -> GpuDisplayResult<()> { + // SAFETY: // Safe because only a valid surface is used. unsafe { dwl_surface_commit(self.surface()); @@ -164,6 +174,7 @@ impl GpuDisplaySurface for WaylandSurface { } fn set_position(&mut self, x: u32, y: u32) { + // SAFETY: // Safe because only a valid surface is used. unsafe { dwl_surface_set_position(self.surface(), x, y); @@ -171,6 +182,7 @@ impl GpuDisplaySurface for WaylandSurface { } fn set_scanout_id(&mut self, scanout_id: u32) { + // SAFETY: // Safe because only a valid surface is used. unsafe { dwl_surface_set_scanout_id(self.surface(), scanout_id); @@ -191,10 +203,13 @@ pub struct DisplayWl { /// Error logging callback used by wrapped C implementation. /// +/// # Safety +/// /// safe because it must be passed a valid pointer to null-terminated c-string. unsafe extern "C" fn error_callback(message: *const ::std::os::raw::c_char) { catch_unwind(|| { assert!(!message.is_null()); + // SAFETY: trivially safe let msg = unsafe { std::str::from_utf8(std::slice::from_raw_parts( message as *const u8, @@ -210,6 +225,7 @@ unsafe extern "C" fn error_callback(message: *const ::std::os::raw::c_char) { impl DisplayWl { /// Opens a fresh connection to the compositor. pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult { + // SAFETY: // The dwl_context_new call should always be safe to call, and we check its result. let ctx = DwlContext(unsafe { dwl_context_new(Some(error_callback)) }); if ctx.0.is_null() { @@ -233,6 +249,7 @@ impl DisplayWl { .as_ref() .map(|s: &CString| CStr::as_ptr(s)) .unwrap_or(null()); + // SAFETY: args are valid and the return value is checked. let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) }; if !setup_success { return Err(GpuDisplayError::Connect); @@ -250,6 +267,7 @@ impl DisplayWl { } fn pop_event(&self) -> dwl_event { + // SAFETY: // Safe because dwl_next_events from a context's circular buffer. unsafe { let mut ev = zeroed(); @@ -271,6 +289,7 @@ impl DisplayWl { impl DisplayT for DisplayWl { fn pending_events(&self) -> bool { + // SAFETY: // Safe because the function just queries the values of two variables in a context. unsafe { dwl_context_pending_events(self.ctx()) } } @@ -340,6 +359,7 @@ impl DisplayT for DisplayWl { } fn flush(&self) { + // SAFETY: // Safe given that the context pointer is valid. unsafe { dwl_context_dispatch(self.ctx()); @@ -369,6 +389,7 @@ impl DisplayT for DisplayWl { SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA, SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT, }; + // SAFETY: // Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used. // The returned surface is checked for validity before being filed away. let surface = DwlSurface(unsafe { @@ -410,6 +431,7 @@ impl DisplayT for DisplayWl { height: u32, fourcc: u32, ) -> GpuDisplayResult> { + // SAFETY: // Safe given that the context pointer is valid. Any other invalid parameters would be // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid // before filing it away. diff --git a/gpu_display/src/gpu_display_x.rs b/gpu_display/src/gpu_display_x.rs index 229ae0489f..b2250941a0 100644 --- a/gpu_display/src/gpu_display_x.rs +++ b/gpu_display/src/gpu_display_x.rs @@ -50,6 +50,8 @@ use crate::SysDisplayT; const BUFFER_COUNT: usize = 2; /// A wrapper for XFree that takes any type. +/// SAFETY: It is caller's responsibility to ensure that `t` is valid for the entire duration of the +/// call. unsafe fn x_free(t: *mut T) { xlib::XFree(t as *mut c_void); } @@ -59,6 +61,8 @@ struct XDisplay(Rc>); impl Drop for XDisplay { fn drop(&mut self) { if Rc::strong_count(&self.0) == 1 { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { xlib::XCloseDisplay(self.as_ptr()); } @@ -74,6 +78,8 @@ impl XDisplay { /// Sends any pending commands to the X server. fn flush(&self) { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { xlib::XFlush(self.as_ptr()); } @@ -81,20 +87,30 @@ impl XDisplay { /// Returns true of the XShm extension is supported on this display. fn supports_shm(&self) -> bool { - unsafe { xlib::XShmQueryExtension(self.as_ptr()) != 0 } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + xlib::XShmQueryExtension(self.as_ptr()) != 0 + } } /// Gets the default screen of this display. fn default_screen(&self) -> Option { - Some(XScreen(NonNull::new(unsafe { - xlib::XDefaultScreenOfDisplay(self.as_ptr()) - })?)) + Some(XScreen(NonNull::new( + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + xlib::XDefaultScreenOfDisplay(self.as_ptr()) + }, + )?)) } /// Blocks until the next event from the display is received and returns that event. /// /// Always flush before using this if any X commands where issued. fn next_event(&self) -> XEvent { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { let mut ev = zeroed(); xlib::XNextEvent(self.as_ptr(), &mut ev); @@ -105,7 +121,11 @@ impl XDisplay { impl AsRawDescriptor for XDisplay { fn as_raw_descriptor(&self) -> RawDescriptor { - unsafe { xlib::XConnectionNumber(self.as_ptr()) } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + xlib::XConnectionNumber(self.as_ptr()) + } } } @@ -119,12 +139,20 @@ impl From for XEvent { impl XEvent { fn any(&self) -> xlib::XAnyEvent { // All events have the same xany field. - unsafe { self.0.xany } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.0.xany + } } fn type_(&self) -> u32 { // All events have the same type_ field. - unsafe { self.0.type_ as u32 } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.0.type_ as u32 + } } fn window(&self) -> xlib::Window { @@ -134,24 +162,52 @@ impl XEvent { // Some of the event types are dynamic so they need to be passed in. fn as_enum(&self, shm_complete_type: u32) -> XEventEnum { match self.type_() { - xlib::KeyPress | xlib::KeyRelease => XEventEnum::KeyEvent(unsafe { self.0.xkey }), - xlib::ButtonPress => XEventEnum::ButtonEvent { - event: unsafe { self.0.xbutton }, - pressed: true, - }, - xlib::ButtonRelease => XEventEnum::ButtonEvent { - event: unsafe { self.0.xbutton }, - pressed: false, - }, - xlib::MotionNotify => XEventEnum::Motion(unsafe { self.0.xmotion }), + xlib::KeyPress | xlib::KeyRelease => XEventEnum::KeyEvent( + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.0.xkey + }, + ), + xlib::ButtonPress => { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + XEventEnum::ButtonEvent { + event: unsafe { self.0.xbutton }, + pressed: true, + } + } + xlib::ButtonRelease => { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + XEventEnum::ButtonEvent { + event: unsafe { self.0.xbutton }, + pressed: false, + } + } + xlib::MotionNotify => XEventEnum::Motion( + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.0.xmotion + }, + ), xlib::Expose => XEventEnum::Expose, xlib::ClientMessage => { - XEventEnum::ClientMessage(unsafe { self.0.xclient.data.l[0] as u64 }) + XEventEnum::ClientMessage( + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.0.xclient.data.l[0] as u64 + }, + ) } t if t == shm_complete_type => { // Because XShmCompletionEvent is not part of the XEvent union, simulate a union // with transmute_copy. If the shm_complete_type turns out to be bogus, some of the // data would be incorrect, but the common event fields would still be valid. + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ev_completion: xlib::XShmCompletionEvent = unsafe { transmute_copy(&self.0) }; XEventEnum::ShmCompletionEvent(ev_completion.shmseg) } @@ -183,7 +239,11 @@ impl XScreen { /// Gets the screen number of this screen. fn get_number(&self) -> i32 { - unsafe { xlib::XScreenNumberOfScreen(self.as_ptr()) } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + xlib::XScreenNumberOfScreen(self.as_ptr()) + } } } @@ -199,6 +259,8 @@ struct Buffer { impl Drop for Buffer { fn drop(&mut self) { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { xlib::XShmDetach(self.display.as_ptr(), self.segment_info.as_mut()); xlib::XDestroyImage(self.image); @@ -210,14 +272,24 @@ impl Drop for Buffer { impl Buffer { fn as_volatile_slice(&self) -> VolatileSlice { - unsafe { VolatileSlice::from_raw_parts(self.segment_info.shmaddr as *mut _, self.size) } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + VolatileSlice::from_raw_parts(self.segment_info.shmaddr as *mut _, self.size) + } } fn stride(&self) -> usize { - unsafe { (*self.image).bytes_per_line as usize } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + (*self.image).bytes_per_line as usize + } } fn bytes_per_pixel(&self) -> usize { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let bytes_per_pixel = unsafe { (*self.image).bits_per_pixel / 8 }; bytes_per_pixel as usize } @@ -259,6 +331,8 @@ impl XSurface { _ => { // If there is no buffer, that means the framebuffer was never set and we should // simply blank the window with arbitrary contents. + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { xlib::XClearWindow(self.display.as_ptr(), self.window); } @@ -268,6 +342,8 @@ impl XSurface { // Mark the buffer as in use. When the XShmCompletionEvent occurs, this will get marked // false. buffer.in_use = true; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { xlib::XShmPutImage( self.display.as_ptr(), @@ -296,6 +372,8 @@ impl XSurface { return self.buffers[buffer_index].as_ref(); } // The buffer_index is valid and the buffer was never created, so we create it now. + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { // The docs for XShmCreateImage imply that XShmSegmentInfo must be allocated to live at // least as long as the XImage, which probably means it can't move either. Use a Box in @@ -405,6 +483,7 @@ impl GpuDisplaySurface for XSurface { impl Drop for XSurface { fn drop(&mut self) { + // SAFETY: // Safe given it should always be of the correct type. unsafe { xlib::XFreeGC(self.display.as_ptr(), self.gc); @@ -432,6 +511,8 @@ impl DisplayX { let keycode_translator = KeycodeTranslator::new(KeycodeTypes::XkbScancode); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { // Open the display let display = match NonNull::new(xlib::XOpenDisplay( @@ -508,7 +589,11 @@ impl DisplayX { impl DisplayT for DisplayX { fn pending_events(&self) -> bool { - unsafe { xlib::XPending(self.display.as_ptr()) != 0 } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + xlib::XPending(self.display.as_ptr()) != 0 + } } fn flush(&self) { @@ -621,6 +706,8 @@ impl DisplayT for DisplayX { return Err(GpuDisplayError::Unsupported); } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { let depth = xlib::XDefaultDepthOfScreen(self.screen.as_ptr()) as u32; diff --git a/hypervisor/src/geniezone/geniezone_sys/aarch64/bindings.rs b/hypervisor/src/geniezone/geniezone_sys/aarch64/bindings.rs index dd546aba90..6406c1a11f 100644 --- a/hypervisor/src/geniezone/geniezone_sys/aarch64/bindings.rs +++ b/hypervisor/src/geniezone/geniezone_sys/aarch64/bindings.rs @@ -1,6 +1,7 @@ /* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/hypervisor/src/geniezone/mod.rs b/hypervisor/src/geniezone/mod.rs index 3a5042b05a..b80d87364b 100644 --- a/hypervisor/src/geniezone/mod.rs +++ b/hypervisor/src/geniezone/mod.rs @@ -85,6 +85,7 @@ use crate::PSCI_0_2; impl Geniezone { /// Get the size of guest physical addresses (IPA) in bits. pub fn get_guest_phys_addr_bits(&self) -> u8 { + // SAFETY: // Safe because we know self is a real geniezone fd match unsafe { ioctl_with_val( @@ -105,6 +106,7 @@ impl GeniezoneVm { pub fn init_arch(&self, cfg: &Config) -> Result<()> { #[cfg(target_arch = "aarch64")] if cfg.mte { + // SAFETY: // Safe because it does not take pointer arguments. unsafe { self.ctrl_geniezone_enable_capability(GeniezoneCap::ArmMte, &[0, 0, 0, 0, 0]) @@ -134,6 +136,7 @@ impl GeniezoneVm { } fn get_protected_vm_info(&self) -> Result { + // SAFETY: // Safe because we allocated the struct and we know the kernel won't write beyond the end of // the struct or keep a pointer to it. let cap: gzvm_enable_cap = unsafe { @@ -146,6 +149,7 @@ impl GeniezoneVm { } fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> { + // SAFETY: // Safe because none of the args are pointers. unsafe { self.ctrl_geniezone_enable_capability( @@ -196,6 +200,9 @@ impl VmAArch64 for GeniezoneVm { dtb_addr: fdt_address.offset(), dtb_size: fdt_size.try_into().unwrap(), }; + // SAFETY: + // Safe because we allocated the struct and we know the kernel will modify exactly the size + // of the struct. let ret = unsafe { ioctl_with_ref(self, GZVM_SET_DTB_CONFIG(), &dtb_config) }; if ret == 0 { Ok(()) @@ -221,6 +228,7 @@ impl GeniezoneVcpu { .try_into() .expect("can't represent usize as u64"), }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, GZVM_SET_ONE_REG(), &onereg) }; @@ -249,6 +257,7 @@ impl GeniezoneVcpu { .expect("can't represent usize as u64"), }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, GZVM_GET_ONE_REG(), &onereg) }; @@ -485,6 +494,7 @@ impl VcpuAArch64 for GeniezoneVcpu { // Wrapper around GZVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping // from guest physical to host user pages. // +// SAFETY: // Safe when the guest regions are guaranteed not to overlap. unsafe fn set_user_memory_region( descriptor: &SafeDescriptor, @@ -535,14 +545,16 @@ pub enum GeniezoneCap { impl Geniezone { pub fn new_with_path(device_path: &Path) -> Result { - // Open calls are safe because we give a nul-terminated string and verify the result. let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap(); + // SAFETY: + // Open calls are safe because we give a nul-terminated string and verify the result. let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) }; if ret < 0 { return errno_result(); } - // Safe because we verify that ret is valid and we own the fd. Ok(Geniezone { + // SAFETY: + // Safe because we verify that ret is valid and we own the fd. geniezone: unsafe { SafeDescriptor::from_raw_descriptor(ret) }, }) } @@ -598,12 +610,14 @@ pub struct GeniezoneVm { impl GeniezoneVm { /// Constructs a new `GeniezoneVm` using the given `Geniezone` instance. pub fn new(geniezone: &Geniezone, guest_mem: GuestMemory, cfg: Config) -> Result { + // SAFETY: // Safe because we know gzvm is a real gzvm fd as this module is the only one that can make // gzvm objects. let ret = unsafe { ioctl(geniezone, GZVM_CREATE_VM()) }; if ret < 0 { return errno_result(); } + // SAFETY: // Safe because we verify that ret is valid and we own the fd. let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; for region in guest_mem.regions() { @@ -612,8 +626,9 @@ impl GeniezoneVm { MemoryRegionPurpose::ProtectedFirmwareRegion => GZVM_USER_MEM_REGION_PROTECT_FW, MemoryRegionPurpose::StaticSwiotlbRegion => GZVM_USER_MEM_REGION_STATIC_SWIOTLB, }; + // SAFETY: + // Safe because the guest regions are guaranteed not to overlap. unsafe { - // Safe because the guest regions are guaranteed not to overlap. set_user_memory_region( &vm_descriptor, region.index as MemSlot, @@ -642,14 +657,16 @@ impl GeniezoneVm { // run is a data stucture shared with ko and geniezone let run_mmap_size = self.geniezone.get_vcpu_mmap_size()?; - // Safe because we know that our file is a VM fd and we verify the return result. let fd = + // SAFETY: + // Safe because we know that our file is a VM fd and we verify the return result. unsafe { ioctl_with_val(self, GZVM_CREATE_VCPU(), c_ulong::try_from(id).unwrap()) }; if fd < 0 { return errno_result(); } + // SAFETY: // Wrap the vcpu now in case the following ? returns early. This is safe because we verified // the value of the fd and we own the fd. let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; @@ -671,6 +688,7 @@ impl GeniezoneVm { /// /// See the documentation on the GZVM_CREATE_IRQCHIP ioctl. pub fn create_irq_chip(&self) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl(self, GZVM_CREATE_IRQCHIP()) }; if ret == 0 { @@ -686,6 +704,7 @@ impl GeniezoneVm { irq_level.__bindgen_anon_1.irq = irq; irq_level.level = active as u32; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, GZVM_IRQ_LINE(), &irq_level) }; @@ -715,6 +734,7 @@ impl GeniezoneVm { irqfd.resamplefd = r_evt.as_raw_descriptor() as u32; } + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, GZVM_IRQFD(), &irqfd) }; @@ -737,6 +757,7 @@ impl GeniezoneVm { flags: GZVM_IRQFD_FLAG_DEASSIGN, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, GZVM_IRQFD(), &irqfd) }; @@ -794,6 +815,7 @@ impl GeniezoneVm { flags, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, GZVM_IOEVENTFD(), &ioeventfd) }; @@ -806,9 +828,10 @@ impl GeniezoneVm { /// Checks whether a particular GZVM-specific capability is available for this VM. fn check_raw_capability(&self, capability: GeniezoneCap) -> bool { + let cap: u64 = capability as u64; + // SAFETY: // Safe because we know that our file is a GZVM fd, and if the cap is invalid GZVM assumes // it's an unavailable extension and returns 0. - let cap: u64 = capability as u64; unsafe { ioctl_with_ref(self, GZVM_CHECK_EXTENSION(), &cap); } @@ -843,6 +866,9 @@ impl GeniezoneVm { } pub fn create_geniezone_device(&self, dev: gzvm_create_device) -> Result<()> { + // SAFETY: + // Safe because we allocated the struct and we know the kernel will modify exactly the size + // of the struct and the return value is checked. let ret = unsafe { base::ioctl_with_ref(self, GZVM_CREATE_DEVICE(), &dev) }; if ret == 0 { Ok(()) @@ -922,6 +948,7 @@ impl Vm for GeniezoneVm { }; let flags = 0; + // SAFETY: // Safe because we check that the given guest address is valid and has no overlaps. We also // know that the pointer and size are correct because the MemoryMapping interface ensures // this. We take ownership of the memory mapping so that it won't be unmapped until the slot @@ -964,6 +991,7 @@ impl Vm for GeniezoneVm { if !regions.contains_key(&slot) { return Err(Error::new(ENOENT)); } + // SAFETY: // Safe because the slot is checked against the list of memory slots. unsafe { set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut(), 0)?; @@ -1104,6 +1132,8 @@ impl Vcpu for GeniezoneVcpu { #[allow(clippy::cast_ptr_alignment)] fn set_immediate_exit(&self, exit: bool) { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gzvm_vcpu_run) }; run.immediate_exit = exit as u8; } @@ -1128,12 +1158,14 @@ impl Vcpu for GeniezoneVcpu { // The pointer is page aligned so casting to a different type is well defined, hence the clippy // allow attribute. fn run(&mut self) -> Result { + // SAFETY: // Safe because we know that our file is a VCPU fd and we verify the return result. let ret = unsafe { ioctl_with_val(self, GZVM_RUN(), self.run_mmap.as_ptr() as u64) }; if ret != 0 { return errno_result(); } + // SAFETY: // Safe because we know we mapped enough memory to hold the gzvm_vcpu_run struct because the // kernel told us how large it was. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gzvm_vcpu_run) }; @@ -1145,6 +1177,7 @@ impl Vcpu for GeniezoneVcpu { GZVM_EXIT_EXCEPTION => Err(Error::new(EINVAL)), GZVM_EXIT_DEBUG => Ok(VcpuExit::Debug), GZVM_EXIT_FAIL_ENTRY => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hardware_entry_failure_reason = unsafe { @@ -1157,6 +1190,9 @@ impl Vcpu for GeniezoneVcpu { }) } GZVM_EXIT_SYSTEM_EVENT => { + // SAFETY: + // Safe because the exit_reason (which comes from the kernel) told us which + // union field to use. let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ }; match event_type { GZVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown), @@ -1176,6 +1212,7 @@ impl Vcpu for GeniezoneVcpu { } fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the gzvm_vcpu_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -1183,9 +1220,9 @@ impl Vcpu for GeniezoneVcpu { // Verify that the handler is called in the right context. assert!(run.exit_reason == GZVM_EXIT_MMIO); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. - let mmio = unsafe { &mut run.__bindgen_anon_1.mmio }; let address = mmio.phys_addr; diff --git a/hypervisor/src/gunyah/gunyah_sys/bindings.rs b/hypervisor/src/gunyah/gunyah_sys/bindings.rs index 8b9fc3b895..c7842c4c97 100644 --- a/hypervisor/src/gunyah/gunyah_sys/bindings.rs +++ b/hypervisor/src/gunyah/gunyah_sys/bindings.rs @@ -1,6 +1,7 @@ /* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/hypervisor/src/gunyah/mod.rs b/hypervisor/src/gunyah/mod.rs index 05b00e50a7..868e48d888 100644 --- a/hypervisor/src/gunyah/mod.rs +++ b/hypervisor/src/gunyah/mod.rs @@ -60,14 +60,16 @@ impl AsRawDescriptor for Gunyah { impl Gunyah { pub fn new_with_path(device_path: &Path) -> Result { - // Open calls are safe because we give a nul-terminated string and verify the result. let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap(); + // SAFETY: + // Open calls are safe because we give a nul-terminated string and verify the result. let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) }; if ret < 0 { return errno_result(); } - // Safe because we verify that ret is valid and we own the fd. Ok(Gunyah { + // SAFETY: + // Safe because we verify that ret is valid and we own the fd. gunyah: unsafe { SafeDescriptor::from_raw_descriptor(ret) }, }) } @@ -135,6 +137,7 @@ unsafe fn android_lend_user_memory_region( // Wrapper around GH_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping // from guest physical to host user pages. // +// SAFETY: // Safe when the guest regions are guaranteed not to overlap. unsafe fn set_user_memory_region( vm: &SafeDescriptor, @@ -192,6 +195,7 @@ impl AsRawDescriptor for GunyahVm { impl GunyahVm { pub fn new(gh: &Gunyah, guest_mem: GuestMemory, cfg: Config) -> Result { + // SAFETY: // Safe because we know gunyah is a real gunyah fd as this module is the only one that can // make Gunyah objects. let ret = unsafe { ioctl_with_val(gh, GH_CREATE_VM(), 0 as c_ulong) }; @@ -199,6 +203,7 @@ impl GunyahVm { return errno_result(); } + // SAFETY: // Safe because we verify that ret is valid and we own the fd. let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; for region in guest_mem.regions() { @@ -214,8 +219,9 @@ impl GunyahVm { false }; if lend { + // SAFETY: + // Safe because the guest regions are guarnteed not to overlap. unsafe { - // Safe because the guest regions are guarnteed not to overlap. android_lend_user_memory_region( &vm_descriptor, region.index as MemSlot, @@ -226,8 +232,9 @@ impl GunyahVm { )?; } } else { + // SAFETY: + // Safe because the guest regions are guarnteed not to overlap. unsafe { - // Safe because the guest regions are guarnteed not to overlap. set_user_memory_region( &vm_descriptor, region.index as MemSlot, @@ -263,16 +270,19 @@ impl GunyahVm { arg: &gh_fn_vcpu_arg as *const gh_fn_vcpu_arg as u64, }; + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let fd = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) }; if fd < 0 { return errno_result(); } + // SAFETY: // Wrap the vcpu now in case the following ? returns early. This is safe because we verified // the value of the fd and we own the fd. let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; + // SAFETY: // Safe because we know this is a Gunyah VCPU let res = unsafe { ioctl(&vcpu, GH_VCPU_MMAP_SIZE()) }; if res < 0 { @@ -304,10 +314,12 @@ impl GunyahVm { let function_desc = gh_fn_desc { type_: GH_FN_IRQFD, arg_size: size_of::() as u32, + // SAFETY: // Safe because kernel is expecting pointer with non-zero arg_size arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64, }; + // SAFETY: safe because the return value is checked. let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) }; if ret == 0 { self.routes @@ -332,6 +344,7 @@ impl GunyahVm { arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64, }; + // SAFETY: safe because memory is not modified and the return value is checked. let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION(), &function_desc) }; if ret == 0 { Ok(()) @@ -361,6 +374,7 @@ impl GunyahVm { size: fdt_size.try_into().unwrap(), }; + // SAFETY: // Safe because we know this is a Gunyah VM let ret = unsafe { ioctl_with_ref(self, GH_VM_SET_DTB_CONFIG(), &dtb_config) }; if ret == 0 { @@ -376,6 +390,7 @@ impl GunyahVm { size: fw_size, }; + // SAFETY: // Safe because we know this is a Gunyah VM let ret = unsafe { ioctl_with_ref(self, GH_VM_ANDROID_SET_FW_CONFIG(), &fw_config) }; if ret == 0 { @@ -386,6 +401,7 @@ impl GunyahVm { } fn start(&self) -> Result<()> { + // SAFETY: safe because memory is not modified and the return value is checked. let ret = unsafe { ioctl(self, GH_VM_START()) }; if ret == 0 { Ok(()) @@ -457,6 +473,7 @@ impl Vm for GunyahVm { None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot, }; + // SAFETY: safe because memory is not modified and the return value is checked. let res = unsafe { set_user_memory_region( &self.vm, @@ -552,6 +569,7 @@ impl Vm for GunyahVm { arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64, }; + // SAFETY: safe because memory is not modified and the return value is checked. let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) }; if ret == 0 { Ok(()) @@ -583,6 +601,7 @@ impl Vm for GunyahVm { arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64, }; + // SAFETY: safe because memory is not modified and the return value is checked. let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION(), &function_desc) }; if ret == 0 { Ok(()) @@ -695,18 +714,23 @@ impl Vcpu for GunyahVcpu { } fn run(&mut self) -> Result { + // SAFETY: // Safe because we know our file is a VCPU fd and we verify the return result. let ret = unsafe { ioctl(self, GH_VCPU_RUN()) }; if ret != 0 { return errno_result(); } + // SAFETY: // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct // because the kernel told us how large it is. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) }; match run.exit_reason { GH_VCPU_EXIT_MMIO => Ok(VcpuExit::Mmio), GH_VCPU_EXIT_STATUS => { + // SAFETY: + // Safe because the exit_reason (which comes from the kernel) told us which + // union field to use. let status = unsafe { &mut run.__bindgen_anon_1.status }; match status.status { GH_VM_STATUS_GH_VM_STATUS_LOAD_FAILED => Ok(VcpuExit::FailEntry { @@ -748,6 +772,7 @@ impl Vcpu for GunyahVcpu { } fn set_immediate_exit(&self, exit: bool) { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -764,12 +789,14 @@ impl Vcpu for GunyahVcpu { } fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == GH_VCPU_EXIT_MMIO); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let mmio = unsafe { &mut run.__bindgen_anon_1.mmio }; diff --git a/hypervisor/src/haxm.rs b/hypervisor/src/haxm.rs index c29e6b7674..d042aa77e9 100644 --- a/hypervisor/src/haxm.rs +++ b/hypervisor/src/haxm.rs @@ -145,19 +145,24 @@ impl HypervisorX86_64 for Haxm { (Feature80000001Ecx::LAHF | Feature80000001Ecx::ABM | Feature80000001Ecx::PREFETCHW) .bits(); + // SAFETY: trivially safe let result = unsafe { __cpuid(0x1) }; // Filter HAXM supported cpuids by host-supported cpuids supported_features_1_ecx &= result.ecx; supported_features_1_edx &= result.edx; + // SAFETY: trivially safe let result = unsafe { __cpuid(0x80000001) }; supported_features_80000001_edx &= result.edx; supported_features_80000001_ecx &= result.ecx; + // SAFETY: trivially safe let cpuid_7 = unsafe { __cpuid(0x7) }; + // SAFETY: trivially safe let cpuid_15 = unsafe { __cpuid(0x15) }; + // SAFETY: trivially safe let cpuid_16 = unsafe { __cpuid(0x16) }; Ok(CpuId { diff --git a/hypervisor/src/haxm/haxm_sys/bindings.rs b/hypervisor/src/haxm/haxm_sys/bindings.rs index a71d6b64b0..53047d2f26 100644 --- a/hypervisor/src/haxm/haxm_sys/bindings.rs +++ b/hypervisor/src/haxm/haxm_sys/bindings.rs @@ -24,6 +24,7 @@ See instructions from: https://rust-lang.github.io/rust-bindgen/print.html Suppress warnings from unaligned_references and deref_nullptr */ #![allow(unaligned_references, deref_nullptr)] +#![allow(clippy::undocumented_unsafe_blocks)] #[repr(C)] #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] diff --git a/hypervisor/src/haxm/vcpu.rs b/hypervisor/src/haxm/vcpu.rs index 63a35989c7..ac18a75f0b 100644 --- a/hypervisor/src/haxm/vcpu.rs +++ b/hypervisor/src/haxm/vcpu.rs @@ -88,7 +88,11 @@ pub struct HaxmVcpu { pub(super) io_buffer: *mut c_void, } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Send for HaxmVcpu {} +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe impl Sync for HaxmVcpu {} impl AsRawDescriptor for HaxmVcpu { @@ -101,6 +105,7 @@ impl HaxmVcpu { fn get_vcpu_state(&self) -> Result { let mut state = vcpu_state_t::default(); + // SAFETY: trivially safe with return value checked. let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_GET_REGS(), &mut state) }; if ret != 0 { return errno_result(); @@ -119,6 +124,7 @@ impl HaxmVcpu { } fn set_vcpu_state(&self, state: &mut VcpuState) -> Result<()> { + // SAFETY: trivially safe with return value checked. let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_SET_REGS(), &mut state.state) }; if ret != 0 { return errno_result(); @@ -156,6 +162,7 @@ impl Vcpu for HaxmVcpu { /// Sets the bit that requests an immediate exit. fn set_immediate_exit(&self, exit: bool) { + // SAFETY: // Safe because we know the tunnel is a pointer to a hax_tunnel and we know its size. // Crosvm's HAXM implementation does not use the _exit_reason, so it's fine if we // overwrite it. @@ -182,16 +189,18 @@ impl Vcpu for HaxmVcpu { /// call `handle_fn` with the respective IoOperation to perform the mmio read or write, /// and set the return data in the vcpu so that the vcpu can resume running. fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the // kernel told us how large it was. // Verify that the handler is called for mmio context only. unsafe { assert!((*self.tunnel)._exit_status == HAX_EXIT_FAST_MMIO); } - // Safe because the exit_reason (which comes from the kernel) told us which - // union field to use. let mmio = self.io_buffer as *mut hax_fastmmio; let (address, size, direction) = + // SAFETY: + // Safe because the exit_reason (which comes from the kernel) told us which + // union field to use. unsafe { ((*mmio).gpa, (*mmio).size as usize, (*mmio).direction) }; match direction { @@ -201,9 +210,10 @@ impl Vcpu for HaxmVcpu { size, operation: IoOperation::Read, }) { + let data = u64::from_ne_bytes(data); + // SAFETY: // Safe because we know this is an mmio read, so we need to put data into the // "value" field of the hax_fastmmio. - let data = u64::from_ne_bytes(data); unsafe { (*mmio).__bindgen_anon_1.value = data; } @@ -211,6 +221,7 @@ impl Vcpu for HaxmVcpu { Ok(()) } HAX_EXIT_DIRECTION_MMIO_WRITE => { + // SAFETY: // safe because we trust haxm to fill in the union properly. let data = unsafe { (*mmio).__bindgen_anon_1.value }; handle_fn(IoParams { @@ -233,12 +244,14 @@ impl Vcpu for HaxmVcpu { /// and set the return data in the vcpu so that the vcpu can resume running. #[allow(clippy::cast_ptr_alignment)] fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the // kernel told us how large it was. // Verify that the handler is called for io context only. unsafe { assert!((*self.tunnel)._exit_status == HAX_EXIT_IO); } + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let io = unsafe { (*self.tunnel).__bindgen_anon_1.io }; @@ -251,6 +264,7 @@ impl Vcpu for HaxmVcpu { size, operation: IoOperation::Read, }) { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us that // this is port io, where the iobuf can be treated as a *u8 unsafe { @@ -261,6 +275,7 @@ impl Vcpu for HaxmVcpu { } HAX_EXIT_DIRECTION_PIO_OUT => { let mut data = [0; 8]; + // SAFETY: // safe because we check the size, from what the kernel told us is the max to copy. unsafe { copy_nonoverlapping( @@ -304,12 +319,14 @@ impl Vcpu for HaxmVcpu { // The pointer is page aligned so casting to a different type is well defined, hence the clippy // allow attribute. fn run(&mut self) -> Result { - // Safe because we know that our file is a VCPU fd and we verify the return result. + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl(self, HAX_VCPU_IOCTL_RUN()) }; if ret != 0 { return errno_result(); } + // SAFETY: // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the // kernel told us how large it was. let exit_status = unsafe { (*self.tunnel)._exit_status }; @@ -333,6 +350,7 @@ impl VcpuX86_64 for HaxmVcpu { /// Sets or clears the flag that requests the VCPU to exit when it becomes possible to inject /// interrupts into the guest. fn set_interrupt_window_requested(&self, requested: bool) { + // SAFETY: // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the // kernel told us how large it was. unsafe { @@ -342,6 +360,7 @@ impl VcpuX86_64 for HaxmVcpu { /// Checks if we can inject an interrupt into the VCPU. fn ready_for_interrupt(&self) -> bool { + // SAFETY: // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the // kernel told us how large it was. unsafe { (*self.tunnel).ready_for_interrupt_injection != 0 } @@ -349,6 +368,8 @@ impl VcpuX86_64 for HaxmVcpu { /// Injects interrupt vector `irq` into the VCPU. fn interrupt(&self, irq: u32) -> Result<()> { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_INTERRUPT(), &irq) }; if ret != 0 { return errno_result(); @@ -391,6 +412,8 @@ impl VcpuX86_64 for HaxmVcpu { /// Gets the VCPU FPU registers. fn get_fpu(&self) -> Result { let mut fpu = fx_layout::default(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU(), &mut fpu) }; if ret != 0 { @@ -403,6 +426,8 @@ impl VcpuX86_64 for HaxmVcpu { /// Sets the VCPU FPU registers. fn set_fpu(&self, fpu: &Fpu) -> Result<()> { let mut current_fpu = fx_layout::default(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU(), &mut current_fpu) }; if ret != 0 { @@ -415,6 +440,8 @@ impl VcpuX86_64 for HaxmVcpu { // fpu state's mxcsr_mask matches its current value new_fpu.mxcsr_mask = current_fpu.mxcsr_mask; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_SET_FPU(), &new_fpu) }; if ret != 0 { @@ -483,6 +510,8 @@ impl VcpuX86_64 for HaxmVcpu { // Copy chunk into msr_data msr_data.entries[..chunk_size].copy_from_slice(&hax_chunk); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS(), &mut msr_data) }; if ret != 0 { return errno_result(); @@ -518,6 +547,8 @@ impl VcpuX86_64 for HaxmVcpu { // Copy chunk into msr_data msr_data.entries[..chunk_size].copy_from_slice(&hax_chunk); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS(), &mut msr_data) }; if ret != 0 { return errno_result(); @@ -532,11 +563,15 @@ impl VcpuX86_64 for HaxmVcpu { let total = cpuid.cpu_id_entries.len(); let mut hax = vec_with_array_field::(total); hax[0].total = total as u32; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let entries = unsafe { hax[0].entries.as_mut_slice(total) }; for (i, e) in cpuid.cpu_id_entries.iter().enumerate() { entries[i] = hax_cpuid_entry::from(e); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_ptr_sized( self, @@ -607,6 +642,8 @@ struct VcpuState { impl VcpuState { fn get_regs(&self) -> Regs { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { Regs { rax: self @@ -795,6 +832,8 @@ impl VcpuState { impl From<&segment_desc_t> for Segment { fn from(item: &segment_desc_t) -> Self { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { Segment { base: item.base, @@ -822,6 +861,8 @@ impl From<&Segment> for segment_desc_t { ..Default::default() }; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { segment .__bindgen_anon_1 @@ -888,7 +929,9 @@ impl From<&fx_layout> for Fpu { fsw: item.fsw, ftwx: item.ftw, last_opcode: item.fop, + // SAFETY: trivially safe last_ip: unsafe { item.__bindgen_anon_1.fpu_ip }, + // SAFETY: trivially safe last_dp: unsafe { item.__bindgen_anon_2.fpu_dp }, xmm: [[0; 16]; 16], mxcsr: item.mxcsr, diff --git a/hypervisor/src/haxm/vm.rs b/hypervisor/src/haxm/vm.rs index ef9ee8b5d8..5061de3efd 100644 --- a/hypervisor/src/haxm/vm.rs +++ b/hypervisor/src/haxm/vm.rs @@ -74,6 +74,7 @@ impl HaxmVm { /// Constructs a new `HaxmVm` using the given `Haxm` instance. pub fn new(haxm: &Haxm, guest_mem: GuestMemory) -> Result { let mut vm_id: u32 = 0; + // SAFETY: // Safe because we know descriptor is a real haxm descriptor as this module is the only // one that can make Haxm objects. let ret = unsafe { ioctl_with_mut_ref(haxm, HAX_IOCTL_CREATE_VM(), &mut vm_id) }; @@ -85,8 +86,9 @@ impl HaxmVm { let vm_descriptor = open_haxm_vm_device(USE_GHAXM.load(Ordering::Relaxed), vm_id)?; for region in guest_mem.regions() { + // SAFETY: + // Safe because the guest regions are guaranteed not to overlap. unsafe { - // Safe because the guest regions are guaranteed not to overlap. set_user_memory_region( &vm_descriptor, false, @@ -111,6 +113,8 @@ impl HaxmVm { pub fn check_raw_capability(&self, cap: u32) -> bool { let mut capability_info = hax_capabilityinfo::default(); let ret = + // SAFETY: + // Safe because we know that our file is a VM fd and we verify the return result. unsafe { ioctl_with_mut_ref(&self.haxm, HAX_IOCTL_CAPABILITY(), &mut capability_info) }; if ret != 0 { @@ -136,6 +140,8 @@ impl HaxmVm { let wstring = &win32_wide_string(path); log_file.path[..wstring.len()].clone_from_slice(wstring); + // SAFETY: + // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_REGISTER_LOG_FILE(), &log_file) }; if ret != 0 { @@ -184,6 +190,8 @@ unsafe fn set_user_memory_region( ..Default::default() }; + // SAFETY: + // Safe because we know that our file is a VM fd and we verify the return result. let ret = ioctl_with_ref(descriptor, HAX_VM_IOCTL_SET_RAM2(), &ram_info); if ret != 0 { return errno_result(); @@ -235,6 +243,7 @@ impl Vm for HaxmVm { None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot, }; + // SAFETY: // Safe because we check that the given guest address is valid and has no overlaps. We also // know that the pointer and size are correct because the MemoryMapping interface ensures // this. We take ownership of the memory mapping so that it won't be unmapped until the slot @@ -273,6 +282,7 @@ impl Vm for HaxmVm { let mut regions = self.mem_regions.lock(); if let Some((guest_addr, mem)) = regions.get(&slot) { + // SAFETY: // Safe because the slot is checked against the list of memory slots. unsafe { set_user_memory_region( @@ -416,6 +426,7 @@ impl VmX86_64 for HaxmVm { } fn create_vcpu(&self, id: usize) -> Result> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let fd = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_VCPU_CREATE(), &(id as u32)) }; if fd < 0 { @@ -427,6 +438,7 @@ impl VmX86_64 for HaxmVm { let mut tunnel_info = hax_tunnel_info::default(); + // SAFETY: // Safe because we created tunnel_info and we check the return code for errors let ret = unsafe { ioctl_with_mut_ref(&descriptor, HAX_VCPU_IOCTL_SETUP_TUNNEL(), &mut tunnel_info) diff --git a/hypervisor/src/haxm/win.rs b/hypervisor/src/haxm/win.rs index c12d77d20d..f2730f73e2 100644 --- a/hypervisor/src/haxm/win.rs +++ b/hypervisor/src/haxm/win.rs @@ -17,6 +17,7 @@ use winapi::um::winnt::GENERIC_READ; use winapi::um::winnt::GENERIC_WRITE; pub(super) fn open_haxm_device(use_ghaxm: bool) -> Result { + // SAFETY: // Open calls are safe because we give a constant nul-terminated string and verify the // result. let ret = unsafe { @@ -39,6 +40,7 @@ pub(super) fn open_haxm_device(use_ghaxm: bool) -> Result { if ret == INVALID_HANDLE_VALUE { return errno_result(); } + // SAFETY: // Safe because we verify that ret is valid and we own the fd. Ok(unsafe { SafeDescriptor::from_raw_descriptor(ret) }) } @@ -49,6 +51,7 @@ pub(super) fn open_haxm_vm_device(use_ghaxm: bool, vm_id: u32) -> Result Result Result { + // SAFETY: // Safe because we know self is a real kvm fd let ipa_size = match unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), KVM_CAP_ARM_VM_IPA_SIZE.into()) @@ -81,6 +82,7 @@ impl Kvm { /// Get the size of guest physical addresses (IPA) in bits. pub fn get_guest_phys_addr_bits(&self) -> u8 { + // SAFETY: // Safe because we know self is a real kvm fd match unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), KVM_CAP_ARM_VM_IPA_SIZE.into()) } { @@ -96,6 +98,7 @@ impl KvmVm { pub fn init_arch(&self, cfg: &Config) -> Result<()> { #[cfg(target_arch = "aarch64")] if cfg.mte { + // SAFETY: // Safe because it does not take pointer arguments. unsafe { self.enable_raw_capability(KvmCap::ArmMte, 0, &[0, 0, 0, 0])? } } @@ -147,6 +150,7 @@ impl KvmVm { firmware_size: 0, reserved: [0; 7], }; + // SAFETY: // Safe because we allocated the struct and we know the kernel won't write beyond the end of // the struct or keep a pointer to it. unsafe { @@ -160,6 +164,7 @@ impl KvmVm { } fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress) -> Result<()> { + // SAFETY: // Safe because none of the args are pointers. unsafe { self.enable_raw_capability( @@ -248,6 +253,7 @@ impl KvmVcpu { .try_into() .expect("can't represent usize as u64"), }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG(), &onereg) }; @@ -272,6 +278,7 @@ impl KvmVcpu { .expect("can't represent usize as u64"), }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, KVM_GET_ONE_REG(), &onereg) }; @@ -530,6 +537,7 @@ impl VcpuAArch64 for KvmVcpu { target: KVM_ARM_TARGET_GENERIC_V8, features: [0; 7], }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will write exactly the size // of the struct. let ret = unsafe { ioctl_with_mut_ref(&self.vm, KVM_ARM_PREFERRED_TARGET(), &mut kvi) }; @@ -546,8 +554,9 @@ impl VcpuAArch64 for KvmVcpu { kvi.features[0] |= 1 << shift; } - // Safe because we know self.vm is a real kvm fd let check_extension = |ext: u32| -> bool { + // SAFETY: + // Safe because we know self.vm is a real kvm fd unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION(), ext.into()) == 1 } }; if check_extension(KVM_CAP_ARM_PTRAUTH_ADDRESS) @@ -557,6 +566,7 @@ impl VcpuAArch64 for KvmVcpu { kvi.features[0] |= 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC; } + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, KVM_ARM_VCPU_INIT(), &kvi) }; @@ -579,6 +589,7 @@ impl VcpuAArch64 for KvmVcpu { addr: irq_addr as u64, flags: 0, }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_HAS_DEVICE_ATTR(), &irq_attr) }; @@ -586,6 +597,7 @@ impl VcpuAArch64 for KvmVcpu { return errno_result(); } + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR(), &irq_attr) }; @@ -599,6 +611,7 @@ impl VcpuAArch64 for KvmVcpu { addr: 0, flags: 0, }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR(), &init_attr) }; @@ -618,6 +631,7 @@ impl VcpuAArch64 for KvmVcpu { addr: 0, flags: 0, }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_HAS_DEVICE_ATTR(), &pvtime_attr) }; @@ -636,6 +650,7 @@ impl VcpuAArch64 for KvmVcpu { flags: 0, }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_SET_DEVICE_ATTR(), &pvtime_attr) }; @@ -674,6 +689,7 @@ impl VcpuAArch64 for KvmVcpu { #[cfg(feature = "gdb")] fn get_max_hw_bps(&self) -> Result { + // SAFETY: // Safe because the kernel will only return the result of the ioctl. let max_hw_bps = unsafe { ioctl_with_val( @@ -727,6 +743,7 @@ impl VcpuAArch64 for KvmVcpu { dbg.arch.dbg_bcr[i] = 0b1111_11_1; } + // SAFETY: // Safe because the kernel won't read past the end of the kvm_guest_debug struct. let ret = unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG(), &dbg) }; if ret == 0 { diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs index 162acba328..c12bed1066 100644 --- a/hypervisor/src/kvm/mod.rs +++ b/hypervisor/src/kvm/mod.rs @@ -92,6 +92,7 @@ use crate::VmCap; // Wrapper around KVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping // from guest physical to host user pages. // +// SAFETY: // Safe when the guest regions are guaranteed not to overlap. unsafe fn set_user_memory_region( descriptor: &SafeDescriptor, @@ -141,15 +142,18 @@ pub type KvmCap = kvm::Cap; impl Kvm { pub fn new_with_path(device_path: &Path) -> Result { - // Open calls are safe because we give a nul-terminated string and verify the result. let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap(); + // SAFETY: + // Open calls are safe because we give a nul-terminated string and verify the result. let ret = unsafe { open64(c_path.as_ptr(), O_RDWR | O_CLOEXEC) }; if ret < 0 { return errno_result(); } + // SAFETY: // Safe because we verify that ret is valid and we own the fd. let kvm = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; + // SAFETY: // Safe because we know that the descriptor is valid and we verify the return result. let version = unsafe { ioctl(&kvm, KVM_GET_API_VERSION()) }; if version < 0 { @@ -176,6 +180,7 @@ impl Kvm { /// Gets the size of the mmap required to use vcpu's `kvm_run` structure. pub fn get_vcpu_mmap_size(&self) -> Result { + // SAFETY: // Safe because we know that our file is a KVM fd and we verify the return result. let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE()) }; if res > 0 { @@ -201,6 +206,7 @@ impl Hypervisor for Kvm { fn check_capability(&self, cap: HypervisorCap) -> bool { if let Ok(kvm_cap) = KvmCap::try_from(cap) { + // SAFETY: // this ioctl is safe because we know this kvm descriptor is valid, // and we are copying over the kvm capability (u32) as a c_ulong value. unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), kvm_cap as c_ulong) == 1 } @@ -224,6 +230,7 @@ pub struct KvmVm { impl KvmVm { /// Constructs a new `KvmVm` using the given `Kvm` instance. pub fn new(kvm: &Kvm, guest_mem: GuestMemory, cfg: Config) -> Result { + // SAFETY: // Safe because we know kvm is a real kvm fd as this module is the only one that can make // Kvm objects. let ret = unsafe { @@ -236,11 +243,13 @@ impl KvmVm { if ret < 0 { return errno_result(); } + // SAFETY: // Safe because we verify that ret is valid and we own the fd. let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) }; for region in guest_mem.regions() { + // SAFETY: + // Safe because the guest regions are guaranteed not to overlap. unsafe { - // Safe because the guest regions are guaranteed not to overlap. set_user_memory_region( &vm_descriptor, region.index as MemSlot, @@ -267,12 +276,14 @@ impl KvmVm { pub fn create_kvm_vcpu(&self, id: usize) -> Result { let run_mmap_size = self.kvm.get_vcpu_mmap_size()?; + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let fd = unsafe { ioctl_with_val(self, KVM_CREATE_VCPU(), c_ulong::try_from(id).unwrap()) }; if fd < 0 { return errno_result(); } + // SAFETY: // Wrap the vcpu now in case the following ? returns early. This is safe because we verified // the value of the fd and we own the fd. let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; @@ -302,6 +313,7 @@ impl KvmVm { /// /// See the documentation on the KVM_CREATE_IRQCHIP ioctl. pub fn create_irq_chip(&self) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) }; if ret == 0 { @@ -317,6 +329,7 @@ impl KvmVm { irq_level.__bindgen_anon_1.irq = irq; irq_level.level = active.into(); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) }; @@ -346,6 +359,7 @@ impl KvmVm { irqfd.resamplefd = r_evt.as_raw_descriptor() as u32; } + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) }; @@ -368,6 +382,7 @@ impl KvmVm { flags: KVM_IRQFD_FLAG_DEASSIGN, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) }; @@ -385,6 +400,7 @@ impl KvmVm { vec_with_array_field::(routes.len()); irq_routing[0].nr = routes.len() as u32; + // SAFETY: // Safe because we ensured there is enough space in irq_routing to hold the number of // route entries. let irq_routes = unsafe { irq_routing[0].entries.as_mut_slice(routes.len()) }; @@ -392,6 +408,8 @@ impl KvmVm { *irq_route = kvm_irq_routing_entry::from(route); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING(), &irq_routing[0]) }; if ret == 0 { Ok(()) @@ -447,6 +465,7 @@ impl KvmVm { flags, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) }; @@ -459,6 +478,7 @@ impl KvmVm { /// Checks whether a particular KVM-specific capability is available for this VM. pub fn check_raw_capability(&self, capability: KvmCap) -> bool { + // SAFETY: // Safe because we know that our file is a KVM fd, and if the cap is invalid KVM assumes // it's an unavailable extension and returns 0. let ret = unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), capability as c_ulong) }; @@ -495,6 +515,7 @@ impl KvmVm { flags, ..Default::default() }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct, and because we assume the caller has allocated the args appropriately. let ret = ioctl_with_ref(self, KVM_ENABLE_CAP(), &kvm_cap); @@ -549,9 +570,13 @@ impl Vm for KvmVm { #[cfg(target_arch = "x86_64")] VmCap::BusLockDetect => { let args = [KVM_BUS_LOCK_DETECTION_EXIT as u64, 0, 0, 0]; - Ok(unsafe { - self.enable_raw_capability(KvmCap::BusLockDetect, _flags, &args) == Ok(()) - }) + Ok( + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + self.enable_raw_capability(KvmCap::BusLockDetect, _flags, &args) == Ok(()) + }, + ) } _ => Ok(false), } @@ -590,6 +615,7 @@ impl Vm for KvmVm { None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot, }; + // SAFETY: // Safe because we check that the given guest address is valid and has no overlaps. We also // know that the pointer and size are correct because the MemoryMapping interface ensures // this. We take ownership of the memory mapping so that it won't be unmapped until the slot @@ -631,6 +657,7 @@ impl Vm for KvmVm { if !regions.contains_key(&slot) { return Err(Error::new(ENOENT)); } + // SAFETY: // Safe because the slot is checked against the list of memory slots. unsafe { set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?; @@ -657,12 +684,16 @@ impl Vm for KvmVm { } }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only write correct // amount of memory to our pointer, and we verify the return result. let ret = unsafe { base::ioctl_with_ref(self, KVM_CREATE_DEVICE(), &device) }; if ret == 0 { - // Safe because we verify that ret is valid and we own the fd. - Ok(unsafe { SafeDescriptor::from_raw_descriptor(device.fd as i32) }) + Ok( + // SAFETY: + // Safe because we verify that ret is valid and we own the fd. + unsafe { SafeDescriptor::from_raw_descriptor(device.fd as i32) }, + ) } else { errno_result() } @@ -681,6 +712,7 @@ impl Vm for KvmVm { ..Default::default() }; dirty_log_kvm.__bindgen_anon_1.dirty_bitmap = dirty_log.as_ptr() as *mut c_void; + // SAFETY: // Safe because the `dirty_bitmap` pointer assigned above is guaranteed to be valid (because // it's from a slice) and we checked that it will be large enough to hold the entire log. let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG(), &dirty_log_kvm) }; @@ -817,6 +849,7 @@ impl Vcpu for KvmVcpu { #[allow(clippy::cast_ptr_alignment)] fn set_immediate_exit(&self, exit: bool) { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -838,6 +871,7 @@ impl Vcpu for KvmVcpu { // flag to prevent the soft lockup detection from triggering when this vCPU resumes, which // could happen days later in realtime. if self.cap_kvmclock_ctrl { + // SAFETY: // The ioctl is safe because it does not read or write memory in this process. if unsafe { ioctl(self, KVM_KVMCLOCK_CTRL()) } != 0 { return errno_result(); @@ -853,6 +887,7 @@ impl Vcpu for KvmVcpu { args: *args, ..Default::default() }; + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct, and because we assume the caller has allocated the args appropriately. let ret = ioctl_with_ref(self, KVM_ENABLE_CAP(), &kvm_cap); @@ -867,12 +902,14 @@ impl Vcpu for KvmVcpu { // The pointer is page aligned so casting to a different type is well defined, hence the clippy // allow attribute. fn run(&mut self) -> Result { + // SAFETY: // Safe because we know that our file is a VCPU fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_RUN()) }; if ret != 0 { return errno_result(); } + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; @@ -880,6 +917,7 @@ impl Vcpu for KvmVcpu { KVM_EXIT_IO => Ok(VcpuExit::Io), KVM_EXIT_MMIO => Ok(VcpuExit::Mmio), KVM_EXIT_IOAPIC_EOI => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let vector = unsafe { run.__bindgen_anon_1.eoi.vector }; @@ -894,6 +932,7 @@ impl Vcpu for KvmVcpu { KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen), KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown), KVM_EXIT_FAIL_ENTRY => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hardware_entry_failure_reason = unsafe { @@ -920,10 +959,14 @@ impl Vcpu for KvmVcpu { KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch), KVM_EXIT_EPR => Ok(VcpuExit::Epr), KVM_EXIT_SYSTEM_EVENT => { + // SAFETY: // Safe because we know the exit reason told us this union // field is valid let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ }; let event_flags = + // SAFETY: + // Safe because we know the exit reason told us this union + // field is valid unsafe { run.__bindgen_anon_1.system_event.__bindgen_anon_1.flags }; match event_type { KVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown), @@ -939,6 +982,7 @@ impl Vcpu for KvmVcpu { } } KVM_EXIT_X86_RDMSR => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let msr = unsafe { &mut run.__bindgen_anon_1.msr }; @@ -948,6 +992,7 @@ impl Vcpu for KvmVcpu { Ok(VcpuExit::RdMsr { index }) } KVM_EXIT_X86_WRMSR => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let msr = unsafe { &mut run.__bindgen_anon_1.msr }; @@ -989,12 +1034,14 @@ impl Vcpu for KvmVcpu { } fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == KVM_EXIT_MMIO); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let mmio = unsafe { &mut run.__bindgen_anon_1.mmio }; @@ -1020,17 +1067,20 @@ impl Vcpu for KvmVcpu { } fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == KVM_EXIT_IO); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let io = unsafe { run.__bindgen_anon_1.io }; let size = usize::from(io.size); + // SAFETY: // The data_offset is defined by the kernel to be some number of bytes into the kvm_run // structure, which we have fully mmap'd. let mut data_ptr = unsafe { (run as *mut kvm_run as *mut u8).add(io.data_offset as usize) }; @@ -1043,6 +1093,8 @@ impl Vcpu for KvmVcpu { size, operation: IoOperation::Read, }) { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { copy_nonoverlapping(data.as_ptr(), data_ptr, size); data_ptr = data_ptr.add(size); @@ -1056,6 +1108,8 @@ impl Vcpu for KvmVcpu { KVM_EXIT_IO_OUT => { for _ in 0..io.count { let mut data = [0; 8]; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { copy_nonoverlapping(data_ptr, data.as_mut_ptr(), min(size, data.len())); data_ptr = data_ptr.add(size); @@ -1076,16 +1130,20 @@ impl Vcpu for KvmVcpu { &self, handle_fn: &mut dyn FnMut(HypervHypercall) -> u64, ) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == KVM_EXIT_HYPERV); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hyperv = unsafe { &mut run.__bindgen_anon_1.hyperv }; match hyperv.type_ { KVM_EXIT_HYPERV_SYNIC => { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let synic = unsafe { &hyperv.u.synic }; handle_fn(HypervHypercall::HypervSynic { msr: synic.msr, @@ -1096,6 +1154,8 @@ impl Vcpu for KvmVcpu { Ok(()) } KVM_EXIT_HYPERV_HCALL => { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let hcall = unsafe { &mut hyperv.u.hcall }; hcall.result = handle_fn(HypervHypercall::HypervHcall { input: hcall.input, @@ -1108,11 +1168,13 @@ impl Vcpu for KvmVcpu { } fn handle_rdmsr(&self, data: u64) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == KVM_EXIT_X86_RDMSR); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let msr = unsafe { &mut run.__bindgen_anon_1.msr }; @@ -1122,11 +1184,13 @@ impl Vcpu for KvmVcpu { } fn handle_wrmsr(&self) { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) }; // Verify that the handler is called in the right context. assert!(run.exit_reason == KVM_EXIT_X86_WRMSR); + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let msr = unsafe { &mut run.__bindgen_anon_1.msr }; @@ -1143,10 +1207,14 @@ impl KvmVcpu { /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone /// to run crosvm on s390. pub fn get_mp_state(&self) -> Result { - // Safe because we know that our file is a VCPU fd, we know the kernel will only write the - // correct amount of memory to our pointer, and we verify the return result. + // SAFETY: trivially safe let mut state: kvm_mp_state = unsafe { std::mem::zeroed() }; - let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE(), &mut state) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // correct amount of memory to our pointer, and we verify the return result. + unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE(), &mut state) } + }; if ret < 0 { return errno_result(); } @@ -1161,9 +1229,10 @@ impl KvmVcpu { /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone /// to run crosvm on s390. pub fn set_mp_state(&self, state: &kvm_mp_state) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the kvm_mp_state struct. - ioctl_with_ref(self, KVM_SET_MP_STATE(), state) + unsafe { ioctl_with_ref(self, KVM_SET_MP_STATE(), state) } }; if ret < 0 { return errno_result(); diff --git a/hypervisor/src/kvm/x86_64.rs b/hypervisor/src/kvm/x86_64.rs index bfbd156344..f26d1d4da8 100644 --- a/hypervisor/src/kvm/x86_64.rs +++ b/hypervisor/src/kvm/x86_64.rs @@ -124,11 +124,12 @@ pub fn get_cpuid_with_initial_capacity( loop { let mut kvm_cpuid = KvmCpuId::new(entries); - let ret = unsafe { + let ret = { + // SAFETY: // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the // memory allocated for the struct. The limit is read from nent within KvmCpuId, // which is set to the allocated size above. - ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) + unsafe { ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) } }; if ret < 0 { let err = Error::last(); @@ -187,11 +188,12 @@ impl HypervisorX86_64 for Kvm { let mut msr_list = vec_with_array_field::(MAX_KVM_MSR_ENTRIES); msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32; - let ret = unsafe { + let ret = { + // SAFETY: // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory // allocated for the struct. The limit is read from nmsrs, which is set to the allocated // size (MAX_KVM_MSR_ENTRIES) above. - ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) + unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) } }; if ret < 0 { return errno_result(); @@ -199,6 +201,7 @@ impl HypervisorX86_64 for Kvm { let mut nmsrs = msr_list[0].nmsrs; + // SAFETY: // Mapping the unsized array to a slice is unsafe because the length isn't known. Using // the length we originally allocated with eliminates the possibility of overflow. let indices: &[u32] = unsafe { @@ -235,10 +238,12 @@ impl KvmVm { /// Arch-specific implementation of `Vm::get_pvclock`. pub fn get_pvclock_arch(&self) -> Result { - // Safe because we know that our file is a VM fd, we know the kernel will only write correct - // amount of memory to our pointer, and we verify the return result. let mut clock_data: kvm_clock_data = Default::default(); - let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) }; + let ret = + // SAFETY: + // Safe because we know that our file is a VM fd, we know the kernel will only write correct + // amount of memory to our pointer, and we verify the return result. + unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) }; if ret == 0 { Ok(ClockState::from(&clock_data)) } else { @@ -249,6 +254,7 @@ impl KvmVm { /// Arch-specific implementation of `Vm::set_pvclock`. pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> { let clock_data = kvm_clock_data::from(state); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read correct // amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), &clock_data) }; @@ -267,17 +273,19 @@ impl KvmVm { chip_id: id as u32, ..Default::default() }; - let ret = unsafe { + let ret = { + // SAFETY: // Safe because we know our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) + unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) } }; if ret == 0 { - Ok(unsafe { + Ok( + // SAFETY: // Safe as we know that we are retrieving data related to the // PIC (primary or secondary) and not IOAPIC. - irqchip_state.chip.pic - }) + unsafe { irqchip_state.chip.pic }, + ) } else { errno_result() } @@ -292,6 +300,7 @@ impl KvmVm { ..Default::default() }; irqchip_state.chip.pic = *state; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; @@ -315,17 +324,19 @@ impl KvmVm { chip_id: 2, ..Default::default() }; - let ret = unsafe { + let ret = { + // SAFETY: // Safe because we know our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) + unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) } }; if ret == 0 { - Ok(unsafe { + Ok( + // SAFETY: // Safe as we know that we are retrieving data related to the // IOAPIC and not PIC. - irqchip_state.chip.ioapic - }) + unsafe { irqchip_state.chip.ioapic }, + ) } else { errno_result() } @@ -340,6 +351,7 @@ impl KvmVm { ..Default::default() }; irqchip_state.chip.ioapic = *state; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; @@ -355,6 +367,7 @@ impl KvmVm { /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. pub fn create_pit(&self) -> Result<()> { let pit_config = kvm_pit_config::default(); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) }; @@ -369,9 +382,10 @@ impl KvmVm { /// /// Note that this call can only succeed after a call to `Vm::create_pit`. pub fn get_pit_state(&self) -> Result { + let mut pit_state = Default::default(); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - let mut pit_state = Default::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) }; if ret == 0 { Ok(pit_state) @@ -384,6 +398,7 @@ impl KvmVm { /// /// Note that this call can only succeed after a call to `Vm::create_pit`. pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) }; @@ -404,6 +419,7 @@ impl KvmVm { | KVM_MSR_EXIT_REASON_INVAL | KVM_MSR_EXIT_REASON_FILTER) as u64; + // SAFETY: // Safe because we know that our file is a VM fd, we know that the // kernel will only read correct amount of memory from our pointer, and // we verify the return result. @@ -423,6 +439,7 @@ impl KvmVm { }; cap.args[0] = allow_read as u64; + // SAFETY: // Safe because we know that our file is a VM fd, we know that the // kernel will only read correct amount of memory from our pointer, and // we verify the return result. @@ -486,6 +503,7 @@ impl KvmVm { let mut ret = 0; if count > 0 { + // SAFETY: // Safe because we know that our file is a VM fd, we know that the // kernel will only read correct amount of memory from our pointer, and // we verify the return result. @@ -506,6 +524,7 @@ impl KvmVm { ..Default::default() }; cap.args[0] = ioapic_pins as u64; + // SAFETY: // safe becuase we allocated the struct and we know the kernel will read // exactly the size of the struct let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), &cap) }; @@ -532,6 +551,7 @@ impl VmX86_64 for KvmVm { /// /// See the documentation on the KVM_SET_TSS_ADDR ioctl. fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr.offset()) }; if ret == 0 { @@ -545,6 +565,7 @@ impl VmX86_64 for KvmVm { /// /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl. fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &addr.offset()) }; if ret == 0 { @@ -566,6 +587,7 @@ impl KvmVcpu { impl VcpuX86_64 for KvmVcpu { #[allow(clippy::cast_ptr_alignment)] fn set_interrupt_window_requested(&self, requested: bool) { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -575,6 +597,7 @@ impl VcpuX86_64 for KvmVcpu { #[allow(clippy::cast_ptr_alignment)] fn ready_for_interrupt(&self) -> bool { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -588,6 +611,7 @@ impl VcpuX86_64 for KvmVcpu { /// ChromeOS doesn't support PPC or MIPS. fn interrupt(&self, irq: u32) -> Result<()> { let interrupt = kvm_interrupt { irq }; + // SAFETY: // safe becuase we allocated the struct and we know the kernel will read // exactly the size of the struct let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT(), &interrupt) }; @@ -599,6 +623,7 @@ impl VcpuX86_64 for KvmVcpu { } fn inject_nmi(&self) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VCPU fd. let ret = unsafe { ioctl(self, KVM_NMI()) }; if ret == 0 { @@ -609,10 +634,13 @@ impl VcpuX86_64 for KvmVcpu { } fn get_regs(&self) -> Result { - // Safe because we know that our file is a VCPU fd, we know the kernel will only read the - // correct amount of memory from our pointer, and we verify the return result. let mut regs: kvm_regs = Default::default(); - let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. + unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) } + }; if ret == 0 { Ok(Regs::from(®s)) } else { @@ -622,9 +650,12 @@ impl VcpuX86_64 for KvmVcpu { fn set_regs(&self, regs: &Regs) -> Result<()> { let regs = kvm_regs::from(regs); - // Safe because we know that our file is a VCPU fd, we know the kernel will only read the - // correct amount of memory from our pointer, and we verify the return result. - let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), ®s) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. + unsafe { ioctl_with_ref(self, KVM_SET_REGS(), ®s) } + }; if ret == 0 { Ok(()) } else { @@ -633,10 +664,13 @@ impl VcpuX86_64 for KvmVcpu { } fn get_sregs(&self) -> Result { - // Safe because we know that our file is a VCPU fd, we know the kernel will only write the - // correct amount of memory to our pointer, and we verify the return result. let mut regs: kvm_sregs = Default::default(); - let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // correct amount of memory to our pointer, and we verify the return result. + unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) } + }; if ret == 0 { Ok(Sregs::from(®s)) } else { @@ -647,9 +681,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_sregs(&self, sregs: &Sregs) -> Result<()> { // Get the current `kvm_sregs` so we can use its `apic_base` and `interrupt_bitmap`, which // are not present in `Sregs`. + let mut kvm_sregs: kvm_sregs = Default::default(); + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut kvm_sregs: kvm_sregs = Default::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut kvm_sregs) }; if ret != 0 { return errno_result(); @@ -672,6 +707,7 @@ impl VcpuX86_64 for KvmVcpu { kvm_sregs.cr8 = sregs.cr8; kvm_sregs.efer = sregs.efer; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), &kvm_sregs) }; @@ -683,9 +719,10 @@ impl VcpuX86_64 for KvmVcpu { } fn get_fpu(&self) -> Result { + let mut fpu: kvm_fpu = Default::default(); + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut fpu: kvm_fpu = Default::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut fpu) }; if ret == 0 { Ok(Fpu::from(&fpu)) @@ -696,9 +733,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_fpu(&self, fpu: &Fpu) -> Result<()> { let fpu = kvm_fpu::from(fpu); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_fpu struct. - ioctl_with_ref(self, KVM_SET_FPU(), &fpu) + unsafe { ioctl_with_ref(self, KVM_SET_FPU(), &fpu) } }; if ret == 0 { Ok(()) @@ -709,11 +747,12 @@ impl VcpuX86_64 for KvmVcpu { /// If the VM reports using XSave2, the function will call XSave2. fn get_xsave(&self) -> Result { - // Safe because we know that our file is a VM fd, we know that the - // kernel will only read correct amount of memory from our pointer, and - // we verify the return result. - // Get the size of Xsave in bytes. Values are of type u32. let size = + // SAFETY: + // Safe because we know that our file is a VM fd, we know that the + // kernel will only read correct amount of memory from our pointer, and + // we verify the return result. + // Get the size of Xsave in bytes. Values are of type u32. unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION(), KVM_CAP_XSAVE2 as u64) }; if size < 0 { return errno_result(); @@ -725,6 +764,7 @@ impl VcpuX86_64 for KvmVcpu { }; let mut xsave = Xsave::new(size as usize); + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) }; @@ -736,13 +776,15 @@ impl VcpuX86_64 for KvmVcpu { } fn set_xsave(&self, xsave: &Xsave) -> Result<()> { - // Safe because we know that our file is a VM fd, we know that the - // kernel will only read correct amount of memory from our pointer, and - // get size from KVM_CAP_XSAVE2. Will return at least 4096 as a value if XSAVE2 is not - // supported or if no extensions are enabled. Otherwise it will return a value higher than - // 4096. - let size = - unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION(), KVM_CAP_XSAVE2 as u64) }; + let size = { + // SAFETY: + // Safe because we know that our file is a VM fd, we know that the + // kernel will only read correct amount of memory from our pointer, and + // get size from KVM_CAP_XSAVE2. Will return at least 4096 as a value if XSAVE2 is not + // supported or if no extensions are enabled. Otherwise it will return a value higher than + // 4096. + unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION(), KVM_CAP_XSAVE2 as u64) } + }; if size < 0 { return errno_result(); } @@ -752,6 +794,7 @@ impl VcpuX86_64 for KvmVcpu { return Err(Error::new(EIO)); } + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. // Because of the len check above, and because the layout of `struct kvm_xsave` is @@ -766,7 +809,12 @@ impl VcpuX86_64 for KvmVcpu { fn get_interrupt_state(&self) -> Result { let mut vcpu_evts: kvm_vcpu_events = Default::default(); - let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut vcpu_evts) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // correct amount of memory to our pointer, and we verify the return result. + unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut vcpu_evts) } + }; if ret == 0 { Ok( serde_json::to_value(VcpuEvents::from(&vcpu_evts)).map_err(|e| { @@ -785,7 +833,12 @@ impl VcpuX86_64 for KvmVcpu { error!("failed to deserialize vcpu_events: {:?}", e); Error::new(EIO) })?); - let ret = unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), &vcpu_events) }; + let ret = { + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. + unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), &vcpu_events) } + }; if ret == 0 { Ok(()) } else { @@ -794,9 +847,10 @@ impl VcpuX86_64 for KvmVcpu { } fn get_debugregs(&self) -> Result { + let mut regs: kvm_debugregs = Default::default(); + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut regs: kvm_debugregs = Default::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) }; if ret == 0 { Ok(DebugRegs::from(®s)) @@ -807,9 +861,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> { let dregs = kvm_debugregs::from(dregs); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_debugregs struct. - ioctl_with_ref(self, KVM_SET_DEBUGREGS(), &dregs) + unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS(), &dregs) } }; if ret == 0 { Ok(()) @@ -819,9 +874,10 @@ impl VcpuX86_64 for KvmVcpu { } fn get_xcrs(&self) -> Result> { + let mut regs: kvm_xcrs = Default::default(); + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut regs: kvm_xcrs = Default::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) }; if ret == 0 { Ok(from_kvm_xcrs(®s)) @@ -832,9 +888,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_xcrs(&self, xcrs: &[Register]) -> Result<()> { let xcrs = to_kvm_xcrs(xcrs); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_xcrs struct. - ioctl_with_ref(self, KVM_SET_XCRS(), &xcrs) + unsafe { ioctl_with_ref(self, KVM_SET_XCRS(), &xcrs) } }; if ret == 0 { Ok(()) @@ -845,14 +902,16 @@ impl VcpuX86_64 for KvmVcpu { fn get_msrs(&self, vec: &mut Vec) -> Result<()> { let msrs = to_kvm_msrs(vec); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read or write past the end of the kvm_msrs struct. - ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) + unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) } }; // KVM_GET_MSRS actually returns the number of msr entries written. if ret < 0 { return errno_result(); } + // SAFETY: // Safe because we trust the kernel to return the correct array length on success. let entries = unsafe { let count = ret as usize; @@ -889,9 +948,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_msrs(&self, vec: &[Register]) -> Result<()> { let msrs = to_kvm_msrs(vec); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_msrs struct. - ioctl_with_ref(self, KVM_SET_MSRS(), &msrs[0]) + unsafe { ioctl_with_ref(self, KVM_SET_MSRS(), &msrs[0]) } }; // KVM_SET_MSRS actually returns the number of msr entries written. if ret < 0 { @@ -917,9 +977,10 @@ impl VcpuX86_64 for KvmVcpu { fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> { let cpuid = KvmCpuId::from(cpuid); - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_msrs struct. - ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) + unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) } }; if ret == 0 { Ok(()) @@ -961,9 +1022,10 @@ impl VcpuX86_64 for KvmVcpu { dbg.arch.debugreg[7] |= 2 << (i * 2); } - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_guest_debug struct. - ioctl_with_ref(self, KVM_SET_GUEST_DEBUG(), &dbg) + unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG(), &dbg) } }; if ret == 0 { Ok(()) @@ -1012,10 +1074,11 @@ impl KvmVcpu { pub fn get_lapic(&self) -> Result { let mut klapic: kvm_lapic_state = Default::default(); - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is unsafe unless you trust the kernel not to write past the end of the // local_apic struct. - ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) + unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) } }; if ret < 0 { return errno_result(); @@ -1027,9 +1090,10 @@ impl KvmVcpu { /// /// See the documentation for KVM_SET_LAPIC. pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the klapic struct. - ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) + unsafe { ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) } }; if ret < 0 { return errno_result(); @@ -1067,6 +1131,9 @@ impl KvmVcpu { /// See the documentation for KVM_GET_SREGS. pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> { let mut regs: kvm_sregs = Default::default(); + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // correct amount of memory to our pointer, and we verify the return result. let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }; if ret >= 0 { Ok(regs.interrupt_bitmap) @@ -1083,9 +1150,15 @@ impl KvmVcpu { // in Sregs being modified from the Vcpu initialization thread and the Irq restoring // thread. let mut regs: kvm_sregs = Default::default(); + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // correct amount of memory to our pointer, and we verify the return result. let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }; if ret >= 0 { regs.interrupt_bitmap = interrupt_bitmap; + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), ®s) }; if ret >= 0 { Ok(()) @@ -1235,6 +1308,7 @@ impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 { impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry { fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self { let mut entry = IoapicRedirectionTableEntry::default(); + // SAFETY: // Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys // table entry layout entry.set(0, 64, unsafe { item.bits }); @@ -1733,9 +1807,10 @@ fn to_kvm_msrs(vec: &[Register]) -> Vec { .collect(); let mut msrs = vec_with_array_field::(vec.len()); + // SAFETY: + // Mapping the unsized array to a slice is unsafe because the length isn't known. + // Providing the length used to create the struct guarantees the entire slice is valid. unsafe { - // Mapping the unsized array to a slice is unsafe because the length isn't known. - // Providing the length used to create the struct guarantees the entire slice is valid. msrs[0] .entries .as_mut_slice(vec.len()) diff --git a/hypervisor/src/x86_64.rs b/hypervisor/src/x86_64.rs index f4d2254e54..adb667f32e 100644 --- a/hypervisor/src/x86_64.rs +++ b/hypervisor/src/x86_64.rs @@ -212,12 +212,14 @@ pub(crate) fn get_tsc_offset_from_msr(vcpu: &impl VcpuX86_64) -> Result { value: 0, }]; + // SAFETY: // Safe because _rdtsc takes no arguments let host_before_tsc = unsafe { _rdtsc() }; // get guest TSC value from our hypervisor vcpu.get_msrs(&mut regs)?; + // SAFETY: // Safe because _rdtsc takes no arguments let host_after_tsc = unsafe { _rdtsc() }; @@ -269,8 +271,10 @@ pub(crate) fn set_tsc_value_via_msr(vcpu: &impl VcpuX86_64, value: u64) -> Resul /// Gets host cpu max physical address bits. #[cfg(any(unix, feature = "haxm", feature = "whpx"))] pub(crate) fn host_phys_addr_bits() -> u8 { + // SAFETY: trivially safe let highest_ext_function = unsafe { __cpuid(0x80000000) }; if highest_ext_function.eax >= 0x80000008 { + // SAFETY: trivially safe let addr_size = unsafe { __cpuid(0x80000008) }; // Low 8 bits of 0x80000008 leaf: host physical address size in bits. addr_size.eax as u8 @@ -485,6 +489,7 @@ pub struct IoapicState { impl Default for IoapicState { fn default() -> IoapicState { + // SAFETY: trivially safe unsafe { std::mem::zeroed() } } } diff --git a/hypervisor/tests/kvm/main.rs b/hypervisor/tests/kvm/main.rs index 9a02796945..125c4253a8 100644 --- a/hypervisor/tests/kvm/main.rs +++ b/hypervisor/tests/kvm/main.rs @@ -251,8 +251,15 @@ fn irqfd_resample() { vm.register_irqfd(4, &evtfd1, Some(&evtfd2)).unwrap(); vm.unregister_irqfd(4, &evtfd1).unwrap(); // Ensures the ioctl is actually reading the resamplefd. - vm.register_irqfd(4, &evtfd1, Some(unsafe { &Event::from_raw_descriptor(-1) })) - .unwrap_err(); + vm.register_irqfd( + 4, + &evtfd1, + Some( + // SAFETY: trivially safe + unsafe { &Event::from_raw_descriptor(-1) }, + ), + ) + .unwrap_err(); } #[test] diff --git a/hypervisor/tests/kvm/x86_64.rs b/hypervisor/tests/kvm/x86_64.rs index b28cb5005d..c83d5de172 100644 --- a/hypervisor/tests/kvm/x86_64.rs +++ b/hypervisor/tests/kvm/x86_64.rs @@ -162,7 +162,13 @@ fn ioapic_state() { assert_eq!(kvm_state.pad, 0); // check first 24 entries for i in 0..24 { - assert_eq!(unsafe { kvm_state.redirtbl[i].bits }, bit_repr); + assert_eq!( + { + // SAFETY: trivially safe + unsafe { kvm_state.redirtbl[i].bits } + }, + bit_repr + ); } // compare with a conversion back @@ -317,6 +323,7 @@ fn enable_feature() { let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap(); vm.create_irq_chip().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); + // SAFETY: trivially safe unsafe { vcpu.enable_raw_capability(kvm_sys::KVM_CAP_HYPERV_SYNIC, &[0; 4]) }.unwrap(); } diff --git a/hypervisor/tests/tsc_offsets.rs b/hypervisor/tests/tsc_offsets.rs index 1f4e9d333c..ef3e70a605 100644 --- a/hypervisor/tests/tsc_offsets.rs +++ b/hypervisor/tests/tsc_offsets.rs @@ -113,6 +113,7 @@ where vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed"); // basic case, we set MSR to 0 + // SAFETY: trivially safe let tsc_now = unsafe { _rdtsc() }; test_tsc_offset_run( &mut vcpu, @@ -124,9 +125,11 @@ where 0, ); // set offset to 0 + // SAFETY: trivially safe let tsc_now = unsafe { _rdtsc() }; test_tsc_offset_run(&mut vcpu, &mem_clone, load_addr, None, Some(0), 0, tsc_now); // some moderately sized offset + // SAFETY: trivially safe let tsc_now = unsafe { _rdtsc() }; let ten_seconds = 2_500_000_000 * 10; test_tsc_offset_run( @@ -139,6 +142,7 @@ where tsc_now + ten_seconds, ); // set offset to u64::MAX - tsc_now + 1 + // SAFETY: trivially safe let tsc_now = unsafe { _rdtsc() }; test_tsc_offset_run( &mut vcpu, diff --git a/io_uring/src/bindings.rs b/io_uring/src/bindings.rs index f9c714a5ca..382bfe461a 100644 --- a/io_uring/src/bindings.rs +++ b/io_uring/src/bindings.rs @@ -2,6 +2,7 @@ #![allow(clippy::missing_safety_doc)] #![allow(clippy::upper_case_acronyms)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] diff --git a/io_uring/src/uring.rs b/io_uring/src/uring.rs index 2298d873d5..ef024748b7 100644 --- a/io_uring/src/uring.rs +++ b/io_uring/src/uring.rs @@ -293,6 +293,7 @@ impl URingContext { ring_params.flags |= IORING_SETUP_R_DISABLED; } + // SAFETY: // The below unsafe block isolates the creation of the URingContext. Each step on it's own // is unsafe. Using the uring FD for the mapping and the offsets returned by the kernel for // base addresses maintains safety guarantees assuming the kernel API guarantees are @@ -634,10 +635,10 @@ impl URingContext { } else { 0 }; - let res = unsafe { + let res = + // SAFETY: // Safe because the only memory modified is in the completion queue. - io_uring_enter(self.ring_file.as_raw_fd(), added as u64, wait_nr, flags) - }; + unsafe { io_uring_enter(self.ring_file.as_raw_fd(), added as u64, wait_nr, flags) }; // An EINTR means we did successfully submit the events. if res.is_ok() || res == Err(libc::EINTR) { @@ -653,8 +654,9 @@ impl URingContext { // EINTR means we were interrupted while waiting, so start waiting again. Err(libc::EBUSY) | Err(libc::EINTR) if wait_nr != 0 => { loop { - // Safe because the only memory modified is in the completion queue. let res = + // SAFETY: + // Safe because the only memory modified is in the completion queue. unsafe { io_uring_enter(self.ring_file.as_raw_fd(), 0, wait_nr, flags) }; if res != Err(libc::EINTR) { return res.map_err(Error::RingEnter); @@ -716,11 +718,10 @@ impl SubmitQueueEntries { if index >= self.len { return None; } - let mut_ref = unsafe { - // Safe because the mut borrow of self resticts to one mutable reference at a time and - // we trust that the kernel has returned enough memory in io_uring_setup and mmap. - &mut *(self.mmap.as_ptr() as *mut io_uring_sqe).add(index) - }; + // SAFETY: + // Safe because the mut borrow of self resticts to one mutable reference at a time and + // we trust that the kernel has returned enough memory in io_uring_setup and mmap. + let mut_ref = unsafe { &mut *(self.mmap.as_ptr() as *mut io_uring_sqe).add(index) }; // Clear any state. *mut_ref = io_uring_sqe::default(); Some(mut_ref) @@ -757,6 +758,7 @@ impl SubmitQueueState { // Sets the kernel's array entry at the given `index` to `value`. fn set_array_entry(&self, index: usize, value: u32) { + // SAFETY: // Safe because self being constructed from the correct mmap guaratees that the memory is // valid to written. unsafe { @@ -803,9 +805,10 @@ impl CompleteQueueState { } fn get_cqe(&self, head: u32) -> &io_uring_cqe { + // SAFETY: + // Safe because we trust that the kernel has returned enough memory in io_uring_setup + // and mmap and index is checked within range by the ring_mask. unsafe { - // Safe because we trust that the kernel has returned enough memory in io_uring_setup - // and mmap and index is checked within range by the ring_mask. let cqes = (self.mmap.as_ptr() as *const u8).add(self.cqes_offset as usize) as *const io_uring_cqe; @@ -870,14 +873,17 @@ struct QueuePointers { tail: *const AtomicU32, } +// SAFETY: // Rust pointers don't implement Send or Sync but in this case both fields are atomics and so it's // safe to send the pointers between threads or access them concurrently from multiple threads. unsafe impl Send for QueuePointers {} +// SAFETY: See safety comments for impl Send unsafe impl Sync for QueuePointers {} impl QueuePointers { // Loads the tail pointer atomically with the given ordering. fn tail(&self, ordering: Ordering) -> u32 { + // SAFETY: // Safe because self being constructed from the correct mmap guaratees that the memory is // valid to read. unsafe { (*self.tail).load(ordering) } @@ -887,6 +893,7 @@ impl QueuePointers { // processing entries that have been added up until the given tail pointer. // Always stores with release ordering as that is the only valid way to use the pointer. fn set_tail(&self, next_tail: u32) { + // SAFETY: // Safe because self being constructed from the correct mmap guaratees that the memory is // valid to read and it's used as an atomic to cover mutability concerns. unsafe { (*self.tail).store(next_tail, Ordering::Release) } @@ -894,6 +901,7 @@ impl QueuePointers { // Loads the head pointer atomically with the given ordering. fn head(&self, ordering: Ordering) -> u32 { + // SAFETY: // Safe because self being constructed from the correct mmap guaratees that the memory is // valid to read. unsafe { (*self.head).load(ordering) } @@ -903,6 +911,7 @@ impl QueuePointers { // processing entries that have been added up until the given head pointer. // Always stores with release ordering as that is the only valid way to use the pointer. fn set_head(&self, next_head: u32) { + // SAFETY: // Safe because self being constructed from the correct mmap guaratees that the memory is // valid to read and it's used as an atomic to cover mutability concerns. unsafe { (*self.head).store(next_head, Ordering::Release) } diff --git a/io_uring/tests/uring.rs b/io_uring/tests/uring.rs index 0d0082847c..648ec4bfce 100644 --- a/io_uring/tests/uring.rs +++ b/io_uring/tests/uring.rs @@ -47,6 +47,8 @@ fn append_file_name(path: &Path, name: &str) -> PathBuf { joined } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe fn add_one_read( uring: &URingContext, ptr: *mut u8, @@ -63,6 +65,8 @@ unsafe fn add_one_read( ) } +// TODO(b/315998194): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe fn add_one_write( uring: &URingContext, ptr: *const u8, @@ -99,6 +103,8 @@ fn read_parallel() { // double the quue depth of buffers. for i in 0..QUEUE_SIZE * 64 { let index = i as u64; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { let offset = (i % QUEUE_SIZE) * BUF_SIZE; match add_one_read( @@ -131,14 +137,16 @@ fn read_readv() { // double the quue depth of buffers. for i in 0..queue_size * 2 { let index = i as u64; + // SAFETY: + // safe to transmut from IoSlice to iovec. let io_vecs = unsafe { - //safe to transmut from IoSlice to iovec. vec![IoSliceMut::new(&mut buf)] .into_iter() .map(|slice| std::mem::transmute::(slice)) }; + // SAFETY: + // Safe because the `wait` call waits until the kernel is done with `buf`. let (user_data_ret, res) = unsafe { - // Safe because the `wait` call waits until the kernel is done with `buf`. uring .add_readv_iter(io_vecs, f.as_raw_fd(), Some((index % 2) * 0x1000), index) .unwrap(); @@ -158,8 +166,9 @@ fn readv_vec() { let mut buf = [0u8; BUF_SIZE]; let mut buf2 = [0u8; BUF_SIZE]; let mut buf3 = [0u8; BUF_SIZE]; + // SAFETY: + //safe to transmut from IoSlice to iovec. let io_vecs = unsafe { - //safe to transmut from IoSlice to iovec. vec![ IoSliceMut::new(&mut buf), IoSliceMut::new(&mut buf2), @@ -171,8 +180,9 @@ fn readv_vec() { }; let total_len = io_vecs.iter().fold(0, |a, iovec| a + iovec.iov_len); let f = create_test_file(total_len as u64 * 2); + // SAFETY: + // Safe because the `wait` call waits until the kernel is done with `buf`. let (user_data_ret, res) = unsafe { - // Safe because the `wait` call waits until the kernel is done with `buf`. uring .add_readv_iter(io_vecs.into_iter(), f.as_raw_fd(), Some(0), 55) .unwrap(); @@ -190,8 +200,9 @@ fn write_one_block() { f.write_all(&buf).unwrap(); f.write_all(&buf).unwrap(); + // SAFETY: + // Safe because the `wait` call waits until the kernel is done mutating `buf`. unsafe { - // Safe because the `wait` call waits until the kernel is done mutating `buf`. add_one_write( &uring, buf.as_mut_ptr(), @@ -222,8 +233,9 @@ fn write_one_submit_poll() { assert!(events.iter().next().is_none()); } + // SAFETY: + // Safe because the `wait` call waits until the kernel is done mutating `buf`. unsafe { - // Safe because the `wait` call waits until the kernel is done mutating `buf`. add_one_write( &uring, buf.as_mut_ptr(), @@ -255,8 +267,9 @@ fn writev_vec() { let buf = [0xaau8; BUF_SIZE]; let buf2 = [0xffu8; BUF_SIZE]; let buf3 = [0x55u8; BUF_SIZE]; + // SAFETY: + //safe to transmut from IoSlice to iovec. let io_vecs = unsafe { - //safe to transmut from IoSlice to iovec. vec![IoSlice::new(&buf), IoSlice::new(&buf2), IoSlice::new(&buf3)] .into_iter() .map(|slice| std::mem::transmute::(slice)) @@ -264,8 +277,9 @@ fn writev_vec() { }; let total_len = io_vecs.iter().fold(0, |a, iovec| a + iovec.iov_len); let mut f = create_test_file(total_len as u64 * 2); + // SAFETY: + // Safe because the `wait` call waits until the kernel is done with `buf`. let (user_data_ret, res) = unsafe { - // Safe because the `wait` call waits until the kernel is done with `buf`. uring .add_writev_iter(io_vecs.into_iter(), f.as_raw_fd(), Some(OFFSET), 55) .unwrap(); @@ -330,6 +344,8 @@ fn fallocate_fsync() { // Add a few writes and then fsync let buf = [0u8; 4096]; let mut pending = std::collections::BTreeSet::new(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { add_one_write(&uring, buf.as_ptr(), buf.len(), f.as_raw_fd(), Some(0), 67).unwrap(); pending.insert(67u64); @@ -453,6 +469,8 @@ fn wake_with_nop() { let uring2 = uring.clone(); let wait_thread = thread::spawn(move || { let mut buf = [0u8; BUF_DATA.len()]; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { add_one_read( &uring2, @@ -756,6 +774,8 @@ fn restrict_ops() { // add_read, which submits Readv, should succeed + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { add_one_read( &uring, @@ -777,6 +797,8 @@ fn restrict_ops() { let mut buf: [u8; 4] = TEST_DATA.to_owned(); // fake data, which should not be written let mut f = create_test_file(4); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { add_one_write( &uring, diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs index e71373933e..f0d75be4af 100644 --- a/kvm/src/lib.rs +++ b/kvm/src/lib.rs @@ -148,19 +148,24 @@ impl Kvm { /// Opens a KVM device at `device_path` and returns a Kvm object on success. pub fn new_with_path(device_path: &Path) -> Result { - // Open calls are safe because we give a nul-terminated string and verify the result. let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap(); + // SAFETY: + // Open calls are safe because we give a nul-terminated string and verify the result. let ret = unsafe { open64(c_path.as_ptr(), O_RDWR | O_CLOEXEC) }; if ret < 0 { return errno_result(); } - // Safe because we verify that ret is valid and we own the fd. Ok(Kvm { - kvm: unsafe { File::from_raw_descriptor(ret) }, + kvm: { + // SAFETY: + // Safe because we verify that ret is valid and we own the fd. + unsafe { File::from_raw_descriptor(ret) } + }, }) } fn check_extension_int(&self, c: Cap) -> i32 { + // SAFETY: // Safe because we know that our file is a KVM fd and that the extension is one of the ones // defined by kernel. unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) } @@ -173,6 +178,7 @@ impl Kvm { /// Gets the size of the mmap required to use vcpu's `kvm_run` structure. pub fn get_vcpu_mmap_size(&self) -> Result { + // SAFETY: // Safe because we know that our file is a KVM fd and we verify the return result. let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE()) }; if res > 0 { @@ -187,12 +193,11 @@ impl Kvm { const MAX_KVM_CPUID_ENTRIES: usize = 256; let mut cpuid = CpuId::new(MAX_KVM_CPUID_ENTRIES); - let ret = unsafe { - // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory - // allocated for the struct. The limit is read from nent, which is set to the allocated - // size(MAX_KVM_CPUID_ENTRIES) above. - ioctl_with_mut_ptr(self, kind, cpuid.as_mut_ptr()) - }; + // SAFETY: + // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory + // allocated for the struct. The limit is read from nent, which is set to the allocated + // size(MAX_KVM_CPUID_ENTRIES) above. + let ret = unsafe { ioctl_with_mut_ptr(self, kind, cpuid.as_mut_ptr()) }; if ret < 0 { return errno_result(); } @@ -222,18 +227,18 @@ impl Kvm { let mut msr_list = vec_with_array_field::(MAX_KVM_MSR_ENTRIES); msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32; - let ret = unsafe { - // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory - // allocated for the struct. The limit is read from nmsrs, which is set to the allocated - // size (MAX_KVM_MSR_ENTRIES) above. - ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) - }; + // SAFETY: + // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory + // allocated for the struct. The limit is read from nmsrs, which is set to the allocated + // size (MAX_KVM_MSR_ENTRIES) above. + let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) }; if ret < 0 { return errno_result(); } let mut nmsrs = msr_list[0].nmsrs; + // SAFETY: // Mapping the unsized array to a slice is unsafe because the length isn't known. Using // the length we originally allocated with eliminates the possibility of overflow. let indices: &[u32] = unsafe { @@ -259,6 +264,7 @@ impl Kvm { // the kernel support. #[allow(clippy::useless_conversion)] pub fn get_vm_type(&self) -> c_ulong { + // SAFETY: // Safe because we know self is a real kvm fd match unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), KVM_CAP_ARM_VM_IPA_SIZE.into()) } { @@ -342,15 +348,18 @@ pub struct Vm { impl Vm { /// Constructs a new `Vm` using the given `Kvm` instance. pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result { + // SAFETY: // Safe because we know kvm is a real kvm fd as this module is the only one that can make // Kvm objects. let ret = unsafe { ioctl_with_val(kvm, KVM_CREATE_VM(), kvm.get_vm_type()) }; if ret >= 0 { + // SAFETY: // Safe because we verify the value of ret and we are the owners of the fd. let vm_file = unsafe { File::from_raw_descriptor(ret) }; for region in guest_mem.regions() { + // SAFETY: + // Safe because the guest regions are guaranteed not to overlap. unsafe { - // Safe because the guest regions are guaranteed not to overlap. set_user_memory_region( &vm_file, region.index as u32, @@ -380,6 +389,7 @@ impl Vm { /// the particular `Vm` existence. This method is encouraged by the kernel because it more /// accurately reflects the usable capabilities. pub fn check_extension(&self, c: Cap) -> bool { + // SAFETY: // Safe because we know that our file is a KVM fd and that the extension is one of the ones // defined by kernel. unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) == 1 } @@ -420,6 +430,7 @@ impl Vm { None => (regions.len() + self.guest_mem.num_regions() as usize) as u32, }; + // SAFETY: // Safe because we check that the given guest address is valid and has no overlaps. We also // know that the pointer and size are correct because the MemoryMapping interface ensures // this. We take ownership of the memory mapping so that it won't be unmapped until the slot @@ -452,6 +463,7 @@ impl Vm { if !regions.contains_key(&slot) { return Err(Error::new(ENOENT)); } + // SAFETY: // Safe because the slot is checked against the list of memory slots. unsafe { set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?; @@ -479,6 +491,7 @@ impl Vm { ..Default::default() }; dirty_log_kvm.__bindgen_anon_1.dirty_bitmap = dirty_log.as_ptr() as *mut c_void; + // SAFETY: // Safe because the `dirty_bitmap` pointer assigned above is guaranteed to be valid // (because it's from a slice) and we checked that it will be large enough to hold // the entire log. @@ -506,6 +519,7 @@ impl Vm { /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl. #[cfg(target_arch = "x86_64")] pub fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &addr.offset()) }; if ret == 0 { @@ -520,9 +534,11 @@ impl Vm { /// See the documentation on the KVM_GET_CLOCK ioctl. #[cfg(target_arch = "x86_64")] pub fn get_clock(&self) -> Result { + // SAFETY: trivially safe + let mut clock_data = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - let mut clock_data = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) }; if ret == 0 { Ok(clock_data) @@ -536,6 +552,7 @@ impl Vm { /// See the documentation on the KVM_SET_CLOCK ioctl. #[cfg(target_arch = "x86_64")] pub fn set_clock(&self, clock_data: &kvm_clock_data) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), clock_data) }; @@ -551,6 +568,7 @@ impl Vm { /// See the documentation on the KVM_CREATE_IRQCHIP ioctl. #[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))] pub fn create_irq_chip(&self) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) }; if ret == 0 { @@ -569,17 +587,17 @@ impl Vm { chip_id: id as u32, ..Default::default() }; - let ret = unsafe { - // Safe because we know our file is a VM fd, we know the kernel will only write - // correct amount of memory to our pointer, and we verify the return result. - ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) - }; + // SAFETY: + // Safe because we know our file is a VM fd, we know the kernel will only write + // correct amount of memory to our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) }; if ret == 0 { - Ok(unsafe { + Ok( + // SAFETY: // Safe as we know that we are retrieving data related to the // PIC (primary or secondary) and not IOAPIC. - irqchip_state.chip.pic - }) + unsafe { irqchip_state.chip.pic }, + ) } else { errno_result() } @@ -595,6 +613,7 @@ impl Vm { ..Default::default() }; irqchip_state.chip.pic = *state; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; @@ -614,17 +633,20 @@ impl Vm { chip_id: 2, ..Default::default() }; - let ret = unsafe { + let ret = + // SAFETY: // Safe because we know our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) + unsafe { + ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) }; if ret == 0 { - Ok(unsafe { + Ok( + // SAFETY: // Safe as we know that we are retrieving data related to the // IOAPIC and not PIC. - irqchip_state.chip.ioapic - }) + unsafe { irqchip_state.chip.ioapic }, + ) } else { errno_result() } @@ -640,6 +662,7 @@ impl Vm { ..Default::default() }; irqchip_state.chip.ioapic = *state; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; @@ -657,6 +680,7 @@ impl Vm { irq_level.__bindgen_anon_1.irq = irq; irq_level.level = active.into(); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) }; @@ -673,6 +697,7 @@ impl Vm { #[cfg(target_arch = "x86_64")] pub fn create_pit(&self) -> Result<()> { let pit_config = kvm_pit_config::default(); + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) }; @@ -688,9 +713,11 @@ impl Vm { /// Note that this call can only succeed after a call to `Vm::create_pit`. #[cfg(target_arch = "x86_64")] pub fn get_pit_state(&self) -> Result { + // SAFETY: trivially safe + let mut pit_state = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only write // correct amount of memory to our pointer, and we verify the return result. - let mut pit_state = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) }; if ret == 0 { Ok(pit_state) @@ -704,6 +731,7 @@ impl Vm { /// Note that this call can only succeed after a call to `Vm::create_pit`. #[cfg(target_arch = "x86_64")] pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) }; @@ -791,6 +819,7 @@ impl Vm { flags, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) }; @@ -817,6 +846,7 @@ impl Vm { gsi, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) }; @@ -840,6 +870,7 @@ impl Vm { flags: KVM_IRQFD_FLAG_DEASSIGN, ..Default::default() }; + // SAFETY: // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) }; @@ -858,6 +889,7 @@ impl Vm { vec_with_array_field::(routes.len()); irq_routing[0].nr = routes.len() as u32; + // SAFETY: // Safe because we ensured there is enough space in irq_routing to hold the number of // route entries. let irq_routes = unsafe { irq_routing[0].entries.as_mut_slice(routes.len()) }; @@ -880,6 +912,8 @@ impl Vm { } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING(), &irq_routing[0]) }; if ret == 0 { Ok(()) @@ -1008,12 +1042,14 @@ impl Vcpu { pub fn new(id: c_ulong, kvm: &Kvm, vm: &Vm) -> Result { let run_mmap_size = kvm.get_vcpu_mmap_size()?; + // SAFETY: // Safe because we know that vm a VM fd and we verify the return result. let vcpu_fd = unsafe { ioctl_with_val(vm, KVM_CREATE_VCPU(), id) }; if vcpu_fd < 0 { return errno_result(); } + // SAFETY: // Wrap the vcpu now in case the following ? returns early. This is safe because we verified // the value of the fd and we own the fd. let vcpu = unsafe { File::from_raw_descriptor(vcpu_fd) }; @@ -1067,6 +1103,7 @@ impl Vcpu { /// `VcpuExit::MmioRead`, or 'VcpuExit::HypervHcall`. #[allow(clippy::cast_ptr_alignment)] pub fn set_data(&self, data: &[u8]) -> Result<()> { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -1074,6 +1111,7 @@ impl Vcpu { match run.exit_reason { KVM_EXIT_IO => { let run_start = run as *mut kvm_run as *mut u8; + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let io = unsafe { run.__bindgen_anon_1.io }; @@ -1084,6 +1122,7 @@ impl Vcpu { if data_size != data.len() { return Err(Error::new(EINVAL)); } + // SAFETY: // The data_offset is defined by the kernel to be some number of bytes into the // kvm_run structure, which we have fully mmap'd. unsafe { @@ -1093,6 +1132,7 @@ impl Vcpu { Ok(()) } KVM_EXIT_MMIO => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let mmio = unsafe { &mut run.__bindgen_anon_1.mmio }; @@ -1107,12 +1147,15 @@ impl Vcpu { Ok(()) } KVM_EXIT_HYPERV => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hyperv = unsafe { &mut run.__bindgen_anon_1.hyperv }; if hyperv.type_ != KVM_EXIT_HYPERV_HCALL { return Err(Error::new(EINVAL)); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let hcall = unsafe { &mut hyperv.u.hcall }; match data.try_into() { Ok(data) => { @@ -1129,6 +1172,7 @@ impl Vcpu { /// Sets the bit that requests an immediate exit. #[allow(clippy::cast_ptr_alignment)] pub fn set_immediate_exit(&self, exit: bool) { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. The pointer is page aligned so casting to a different // type is well defined, hence the clippy allow attribute. @@ -1140,6 +1184,8 @@ impl Vcpu { pub fn set_local_immediate_exit(exit: bool) { VCPU_THREAD.with(|v| { if let Some(state) = &(*v.borrow()) { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { (*state.run).immediate_exit = exit.into(); }; @@ -1150,9 +1196,11 @@ impl Vcpu { /// Gets the VCPU registers. #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] pub fn get_regs(&self) -> Result { + // SAFETY: trivially safe + let mut regs = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. - let mut regs = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) }; if ret != 0 { return errno_result(); @@ -1163,6 +1211,7 @@ impl Vcpu { /// Sets the VCPU registers. #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] pub fn set_regs(&self, regs: &kvm_regs) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), regs) }; @@ -1175,9 +1224,11 @@ impl Vcpu { /// Gets the VCPU special registers. #[cfg(target_arch = "x86_64")] pub fn get_sregs(&self) -> Result { + // SAFETY: trivially safe + let mut regs = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut regs = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }; if ret != 0 { return errno_result(); @@ -1188,6 +1239,7 @@ impl Vcpu { /// Sets the VCPU special registers. #[cfg(target_arch = "x86_64")] pub fn set_sregs(&self, sregs: &kvm_sregs) -> Result<()> { + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), sregs) }; @@ -1200,9 +1252,11 @@ impl Vcpu { /// Gets the VCPU FPU registers. #[cfg(target_arch = "x86_64")] pub fn get_fpu(&self) -> Result { - // Safe because we know that our file is a VCPU fd, we know the kernel will only write the + // SAFETY: trivially safe // correct amount of memory to our pointer, and we verify the return result. let mut regs = unsafe { std::mem::zeroed() }; + // SAFETY: + // Safe because we know that our file is a VCPU fd, we know the kernel will only write the let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut regs) }; if ret != 0 { return errno_result(); @@ -1215,9 +1269,10 @@ impl Vcpu { /// See the documentation for KVM_SET_FPU. #[cfg(target_arch = "x86_64")] pub fn set_fpu(&self, fpu: &kvm_fpu) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_fpu struct. - ioctl_with_ref(self, KVM_SET_FPU(), fpu) + unsafe { ioctl_with_ref(self, KVM_SET_FPU(), fpu) } }; if ret < 0 { return errno_result(); @@ -1228,9 +1283,11 @@ impl Vcpu { /// Gets the VCPU debug registers. #[cfg(target_arch = "x86_64")] pub fn get_debugregs(&self) -> Result { + // SAFETY: trivially safe + let mut regs = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut regs = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) }; if ret != 0 { return errno_result(); @@ -1241,9 +1298,10 @@ impl Vcpu { /// Sets the VCPU debug registers #[cfg(target_arch = "x86_64")] pub fn set_debugregs(&self, dregs: &kvm_debugregs) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_fpu struct. - ioctl_with_ref(self, KVM_SET_DEBUGREGS(), dregs) + unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS(), dregs) } }; if ret < 0 { return errno_result(); @@ -1254,9 +1312,11 @@ impl Vcpu { /// Gets the VCPU extended control registers #[cfg(target_arch = "x86_64")] pub fn get_xcrs(&self) -> Result { + // SAFETY: trivially safe + let mut regs = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. - let mut regs = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) }; if ret != 0 { return errno_result(); @@ -1267,9 +1327,10 @@ impl Vcpu { /// Sets the VCPU extended control registers #[cfg(target_arch = "x86_64")] pub fn set_xcrs(&self, xcrs: &kvm_xcrs) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_xcrs struct. - ioctl_with_ref(self, KVM_SET_XCRS(), xcrs) + unsafe { ioctl_with_ref(self, KVM_SET_XCRS(), xcrs) } }; if ret < 0 { return errno_result(); @@ -1283,21 +1344,27 @@ impl Vcpu { #[cfg(target_arch = "x86_64")] pub fn get_msrs(&self, msr_entries: &mut Vec) -> Result<()> { let mut msrs = vec_with_array_field::(msr_entries.len()); - unsafe { + { + // SAFETY: // Mapping the unsized array to a slice is unsafe because the length isn't known. // Providing the length used to create the struct guarantees the entire slice is valid. - let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(msr_entries.len()); - entries.copy_from_slice(msr_entries); + unsafe { + let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(msr_entries.len()); + entries.copy_from_slice(msr_entries); + } } msrs[0].nmsrs = msr_entries.len() as u32; - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read or write past the end of the kvm_msrs struct. - ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) + unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) } }; if ret < 0 { // KVM_SET_MSRS actually returns the number of msr entries written. return errno_result(); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { let count = ret as usize; assert!(count <= msr_entries.len()); @@ -1313,9 +1380,10 @@ impl Vcpu { /// See the documentation for KVM_SET_MSRS. #[cfg(target_arch = "x86_64")] pub fn set_msrs(&self, msrs: &kvm_msrs) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_msrs struct. - ioctl_with_ref(self, KVM_SET_MSRS(), msrs) + unsafe { ioctl_with_ref(self, KVM_SET_MSRS(), msrs) } }; if ret < 0 { // KVM_SET_MSRS actually returns the number of msr entries written. @@ -1329,9 +1397,10 @@ impl Vcpu { /// See the documentation for KVM_SET_CPUID2. #[cfg(target_arch = "x86_64")] pub fn set_cpuid2(&self, cpuid: &CpuId) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // Here we trust the kernel not to read past the end of the kvm_msrs struct. - ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) + unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) } }; if ret < 0 { return errno_result(); @@ -1345,11 +1414,12 @@ impl Vcpu { const MAX_KVM_CPUID_ENTRIES: usize = 256; let mut cpuid = CpuId::new(MAX_KVM_CPUID_ENTRIES); - let ret = unsafe { + let ret = { + // SAFETY: // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory // allocated for the struct. The limit is read from nent, which is set to the allocated // size(MAX_KVM_CPUID_ENTRIES) above. - ioctl_with_mut_ptr(self, KVM_GET_SUPPORTED_HV_CPUID(), cpuid.as_mut_ptr()) + unsafe { ioctl_with_mut_ptr(self, KVM_GET_SUPPORTED_HV_CPUID(), cpuid.as_mut_ptr()) } }; if ret < 0 { return errno_result(); @@ -1364,10 +1434,11 @@ impl Vcpu { pub fn get_lapic(&self) -> Result { let mut klapic: kvm_lapic_state = Default::default(); - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is unsafe unless you trust the kernel not to write past the end of the // local_apic struct. - ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) + unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) } }; if ret < 0 { return errno_result(); @@ -1380,9 +1451,10 @@ impl Vcpu { /// See the documentation for KVM_SET_LAPIC. #[cfg(target_arch = "x86_64")] pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the klapic struct. - ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) + unsafe { ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) } }; if ret < 0 { return errno_result(); @@ -1399,9 +1471,11 @@ impl Vcpu { /// to run crosvm on s390. #[cfg(target_arch = "x86_64")] pub fn get_mp_state(&self) -> Result { + // SAFETY: trivially safe + let mut state: kvm_mp_state = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel will only // write correct amount of memory to our pointer, and we verify the return result. - let mut state: kvm_mp_state = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE(), &mut state) }; if ret < 0 { return errno_result(); @@ -1418,9 +1492,10 @@ impl Vcpu { /// to run crosvm on s390. #[cfg(target_arch = "x86_64")] pub fn set_mp_state(&self, state: &kvm_mp_state) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the kvm_mp_state struct. - ioctl_with_ref(self, KVM_SET_MP_STATE(), state) + unsafe { ioctl_with_ref(self, KVM_SET_MP_STATE(), state) } }; if ret < 0 { return errno_result(); @@ -1434,10 +1509,12 @@ impl Vcpu { /// #[cfg(target_arch = "x86_64")] pub fn get_vcpu_events(&self) -> Result { + // SAFETY: trivially safe + let mut events: kvm_vcpu_events = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because we know that our file is a VCPU fd, we know the kernel // will only write correct amount of memory to our pointer, and we // verify the return result. - let mut events: kvm_vcpu_events = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut events) }; if ret < 0 { return errno_result(); @@ -1451,10 +1528,11 @@ impl Vcpu { /// #[cfg(target_arch = "x86_64")] pub fn set_vcpu_events(&self, events: &kvm_vcpu_events) -> Result<()> { - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the // kvm_vcpu_events. - ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), events) + unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), events) } }; if ret < 0 { return errno_result(); @@ -1468,6 +1546,7 @@ impl Vcpu { /// This function is marked as unsafe because `cap` may contain values which are interpreted as /// pointers by the kernel. pub unsafe fn kvm_enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> { + // SAFETY: // Safe because we allocated the struct and we know the kernel will read exactly the size of // the struct. let ret = ioctl_with_ref(self, KVM_ENABLE_CAP(), cap); @@ -1492,6 +1571,7 @@ impl Vcpu { // Ensure the length is not too big. const _ASSERT: usize = size_of::() - 8usize; + // SAFETY: // Safe as we allocated exactly the needed space unsafe { copy_nonoverlapping( @@ -1501,10 +1581,11 @@ impl Vcpu { ); } - let ret = unsafe { + let ret = { + // SAFETY: // The ioctl is safe because the kernel will only read from the // kvm_signal_mask structure. - ioctl_with_ref(self, KVM_SET_SIGNAL_MASK(), &kvm_sigmask[0]) + unsafe { ioctl_with_ref(self, KVM_SET_SIGNAL_MASK(), &kvm_sigmask[0]) } }; if ret < 0 { return errno_result(); @@ -1521,6 +1602,7 @@ impl Vcpu { id: reg_id, addr: data_ref as u64, }; + // SAFETY: // safe because we allocated the struct and we know the kernel will read // exactly the size of the struct let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG(), &onereg) }; @@ -1556,14 +1638,17 @@ impl RunnableVcpu { // The pointer is page aligned so casting to a different type is well defined, hence the clippy // allow attribute. pub fn run(&self) -> Result { + // SAFETY: // Safe because we know that our file is a VCPU fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_RUN()) }; if ret == 0 { + // SAFETY: // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. let run = unsafe { &*(self.run_mmap.as_ptr() as *const kvm_run) }; match run.exit_reason { KVM_EXIT_IO => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let io = unsafe { run.__bindgen_anon_1.io }; @@ -1574,6 +1659,7 @@ impl RunnableVcpu { KVM_EXIT_IO_OUT => { let mut data = [0; 8]; let run_start = run as *const kvm_run as *const u8; + // SAFETY: // The data_offset is defined by the kernel to be some number of bytes // into the kvm_run structure, which we have fully mmap'd. unsafe { @@ -1590,6 +1676,7 @@ impl RunnableVcpu { } } KVM_EXIT_MMIO => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let mmio = unsafe { &run.__bindgen_anon_1.mmio }; @@ -1606,17 +1693,21 @@ impl RunnableVcpu { } } KVM_EXIT_IOAPIC_EOI => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let vector = unsafe { run.__bindgen_anon_1.eoi.vector }; Ok(VcpuExit::IoapicEoi { vector }) } KVM_EXIT_HYPERV => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hyperv = unsafe { &run.__bindgen_anon_1.hyperv }; match hyperv.type_ { KVM_EXIT_HYPERV_SYNIC => { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let synic = unsafe { &hyperv.u.synic }; Ok(VcpuExit::HypervSynic { msr: synic.msr, @@ -1626,6 +1717,8 @@ impl RunnableVcpu { }) } KVM_EXIT_HYPERV_HCALL => { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let hcall = unsafe { &hyperv.u.hcall }; Ok(VcpuExit::HypervHcall { input: hcall.input, @@ -1643,6 +1736,7 @@ impl RunnableVcpu { KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen), KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown), KVM_EXIT_FAIL_ENTRY => { + // SAFETY: // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let hardware_entry_failure_reason = unsafe { @@ -1669,9 +1763,14 @@ impl RunnableVcpu { KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch), KVM_EXIT_EPR => Ok(VcpuExit::Epr), KVM_EXIT_SYSTEM_EVENT => { - // Safe because we know the exit reason told us this union - // field is valid - let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ }; + let event_type = { + // SAFETY: + // Safe because we know the exit reason told us this union + // field is valid + unsafe { run.__bindgen_anon_1.system_event.type_ } + }; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let event_flags = unsafe { run.__bindgen_anon_1.system_event.__bindgen_anon_1.flags }; Ok(VcpuExit::SystemEvent(event_type, event_flags)) diff --git a/kvm/tests/dirty_log.rs b/kvm/tests/dirty_log.rs index 1273b0b8f8..75dfa58b98 100644 --- a/kvm/tests/dirty_log.rs +++ b/kvm/tests/dirty_log.rs @@ -39,6 +39,7 @@ fn test_run() { vcpu_sregs.cs.selector = 0; vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed"); + // SAFETY: trivially safe let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() }; vcpu_regs.rip = load_addr.offset(); vcpu_regs.rflags = 2; diff --git a/kvm/tests/kvm_tests.rs b/kvm/tests/kvm_tests.rs index 06833167ff..47ee567886 100644 --- a/kvm/tests/kvm_tests.rs +++ b/kvm/tests/kvm_tests.rs @@ -308,6 +308,7 @@ fn irqfd_resample() { vm.register_irqfd_resample(&evtfd1, &evtfd2, 4).unwrap(); vm.unregister_irqfd(&evtfd1, 4).unwrap(); // Ensures the ioctl is actually reading the resamplefd. + // SAFETY: trivially safe vm.register_irqfd_resample(&evtfd1, unsafe { &Event::from_raw_descriptor(-1) }, 4) .unwrap_err(); } @@ -447,6 +448,8 @@ fn enable_feature() { cap: KVM_CAP_HYPERV_SYNIC, ..Default::default() }; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { vcpu.kvm_enable_cap(&cap) }.unwrap(); } diff --git a/kvm/tests/read_only_memory.rs b/kvm/tests/read_only_memory.rs index 94be4a9f77..790e3658e6 100644 --- a/kvm/tests/read_only_memory.rs +++ b/kvm/tests/read_only_memory.rs @@ -43,6 +43,7 @@ fn test_run() { vcpu_sregs.es.selector = 0; vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed"); + // SAFETY: trivially safe let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() }; vcpu_regs.rip = load_addr.offset(); vcpu_regs.rflags = 2; diff --git a/kvm/tests/real_run_adder.rs b/kvm/tests/real_run_adder.rs index 26a6ae6eb7..18c488e9f4 100644 --- a/kvm/tests/real_run_adder.rs +++ b/kvm/tests/real_run_adder.rs @@ -43,6 +43,7 @@ fn test_run() { vcpu_sregs.cs.selector = 0; vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed"); + // SAFETY: trivially safe let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() }; vcpu_regs.rip = 0x1000; vcpu_regs.rax = 2; diff --git a/kvm_sys/src/aarch64/bindings.rs b/kvm_sys/src/aarch64/bindings.rs index d98198d9e9..d326e80690 100644 --- a/kvm_sys/src/aarch64/bindings.rs +++ b/kvm_sys/src/aarch64/bindings.rs @@ -2,6 +2,7 @@ #![allow(clippy::missing_safety_doc)] #![allow(clippy::upper_case_acronyms)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] diff --git a/kvm_sys/src/x86/bindings.rs b/kvm_sys/src/x86/bindings.rs index 25705e01bd..23b114e599 100644 --- a/kvm_sys/src/x86/bindings.rs +++ b/kvm_sys/src/x86/bindings.rs @@ -1,6 +1,7 @@ /* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/kvm_sys/tests/basic.rs b/kvm_sys/tests/basic.rs index a7148beaf2..1c2ac910f6 100644 --- a/kvm_sys/tests/basic.rs +++ b/kvm_sys/tests/basic.rs @@ -14,27 +14,33 @@ const KVM_PATH: &str = "/dev/kvm\0"; #[test] fn get_version() { + // SAFETY: KVM_PATH is expected to be valid and return value is checked. let sys_fd = unsafe { open64(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); + // SAFETY: sys_fd is expected to be valid and return value is checked. let ret = unsafe { ioctl(sys_fd, KVM_GET_API_VERSION(), 0) }; assert_eq!(ret as u32, KVM_API_VERSION); } #[test] fn create_vm_fd() { + // SAFETY: KVM_PATH is expected to be valid and return value is checked. let sys_fd = unsafe { open64(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); + // SAFETY: sys_fd is expected to be valid and return value is checked. let vm_fd = unsafe { ioctl(sys_fd, KVM_CREATE_VM(), 0) }; assert!(vm_fd >= 0); } #[test] fn check_vm_extension() { + // SAFETY: KVM_PATH is expected to be valid and return value is checked. let sys_fd = unsafe { open64(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); + // SAFETY: sys_fd is expected to be valid and return value is checked. let has_user_memory = unsafe { ioctl(sys_fd, KVM_CHECK_EXTENSION(), KVM_CAP_USER_MEMORY) }; assert_eq!(has_user_memory, 1); } diff --git a/media/ffmpeg/src/avcodec.rs b/media/ffmpeg/src/avcodec.rs index dc61cbbbb4..e24f8145b3 100644 --- a/media/ffmpeg/src/avcodec.rs +++ b/media/ffmpeg/src/avcodec.rs @@ -38,8 +38,9 @@ impl AvError { impl Display for AvError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut buffer = [0u8; 255]; - // Safe because we are passing valid bounds for the buffer. let ret = + // SAFETY: + // Safe because we are passing valid bounds for the buffer. unsafe { ffi::av_strerror(self.0, buffer.as_mut_ptr() as *mut c_char, buffer.len()) }; match ret { ret if ret >= 0 => { @@ -79,12 +80,14 @@ pub struct Dimensions { impl AvCodec { /// Returns whether the codec is a decoder. pub fn is_decoder(&self) -> bool { + // SAFETY: // Safe because `av_codec_is_decoder` is called on a valid static `AVCodec` reference. (unsafe { ffi::av_codec_is_decoder(self.0) } != 0) } /// Returns whether the codec is an encoder. pub fn is_encoder(&self) -> bool { + // SAFETY: // Safe because `av_codec_is_encoder` is called on a valid static `AVCodec` reference. (unsafe { ffi::av_codec_is_encoder(self.0) } != 0) } @@ -93,6 +96,7 @@ impl AvCodec { pub fn name(&self) -> &'static str { const INVALID_CODEC_STR: &str = "invalid codec"; + // SAFETY: // Safe because `CStr::from_ptr` is called on a valid zero-terminated C string. unsafe { CStr::from_ptr(self.0.name).to_str() }.unwrap_or(INVALID_CODEC_STR) } @@ -142,6 +146,8 @@ impl AvCodec { /// Internal helper for `build_decoder` to allocate an [`AvCodecContext`]. This needs to be /// paired with a later call to [`AvCodecContext::init`]. fn alloc_context(&self) -> Result { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { ffi::avcodec_alloc_context3(self.0).as_mut() } .ok_or(AvCodecOpenError::ContextAllocation)?; @@ -169,6 +175,7 @@ impl DecoderContextBuilder { get_buffer2: unsafe extern "C" fn(*mut ffi::AVCodecContext, *mut ffi::AVFrame, i32) -> i32, opaque: *mut libc::c_void, ) { + // SAFETY: // Safe because self.context.0 is a pointer to a live AVCodecContext allocation. let context = unsafe { &mut *(self.context.0) }; context.get_buffer2 = Some(get_buffer2); @@ -193,6 +200,8 @@ pub struct EncoderContextBuilder { impl EncoderContextBuilder { /// Set the width of input frames for this encoding context. pub fn set_dimensions(&mut self, dimensions: Dimensions) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.context.0) }; context.width = dimensions.width as _; context.height = dimensions.height as _; @@ -200,12 +209,16 @@ impl EncoderContextBuilder { /// Set the time base for this encoding context. pub fn set_time_base(&mut self, time_base: ffi::AVRational) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.context.0) }; context.time_base = time_base; } /// Set the input pixel format for this encoding context. pub fn set_pix_fmt(&mut self, fmt: AvPixelFormat) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.context.0) }; context.pix_fmt = fmt.pix_fmt(); } @@ -237,6 +250,7 @@ impl Iterator for AvCodecIterator { type Item = AvCodec; fn next(&mut self) -> Option { + // SAFETY: // Safe because our pointer was initialized to `NULL` and we only use it with // `av_codec_iterate`, which will update it to a valid value. unsafe { ffi::av_codec_iterate(&mut self.0 as *mut *mut libc::c_void).as_ref() } @@ -257,6 +271,7 @@ impl AvProfile { pub fn name(&self) -> &'static str { const INVALID_PROFILE_STR: &str = "invalid profile"; + // SAFETY: // Safe because `CStr::from_ptr` is called on a valid zero-terminated C string. unsafe { CStr::from_ptr(self.0.name).to_str() }.unwrap_or(INVALID_PROFILE_STR) } @@ -281,6 +296,7 @@ impl Iterator for AvProfileIterator { type Item = AvProfile; fn next(&mut self) -> Option { + // SAFETY: // Safe because the contract of `new` stipulates we have received a valid `AVCodec` // reference, thus the `profiles` pointer must either be NULL or point to a valid array // or `VAProfile`s. @@ -290,6 +306,7 @@ impl Iterator for AvProfileIterator { match profile.profile { ffi::FF_PROFILE_UNKNOWN => None, _ => { + // SAFETY: // Safe because we have been initialized to a static, valid profiles array // which is terminated by FF_PROFILE_UNKNOWN. self.0 = unsafe { self.0.offset(1) }; @@ -310,8 +327,10 @@ impl AvPixelFormat { pub fn name(&self) -> &'static str { const INVALID_FORMAT_STR: &str = "invalid pixel format"; + // SAFETY: // Safe because `av_get_pix_fmt_name` returns either NULL or a valid C string. let pix_fmt_name = unsafe { ffi::av_get_pix_fmt_name(self.0) }; + // SAFETY: // Safe because `pix_fmt_name` is a valid pointer to a C string. match unsafe { pix_fmt_name @@ -332,6 +351,7 @@ impl AvPixelFormat { /// Return the fourcc of the pixel format, or a series of zeros if its fourcc is unknown. pub fn fourcc(&self) -> [u8; 4] { + // SAFETY: // Safe because `avcodec_pix_fmt_to_codec_tag` does not take any pointer as input and // handles any value passed as argument. unsafe { ffi::avcodec_pix_fmt_to_codec_tag(self.0) }.to_le_bytes() @@ -392,6 +412,7 @@ impl Iterator for AvPixelFormatIterator { type Item = AvPixelFormat; fn next(&mut self) -> Option { + // SAFETY: // Safe because the contract of `AvCodec::new` and `AvCodec::pixel_format_iter` guarantees // that we have been built from a valid `AVCodec` reference, which `pix_fmts` pointer // must either be NULL or point to a valid array or `VAPixelFormat`s. @@ -402,6 +423,7 @@ impl Iterator for AvPixelFormatIterator { // Array of pixel formats is terminated by AV_PIX_FMT_NONE. ffi::AVPixelFormat_AV_PIX_FMT_NONE => None, _ => { + // SAFETY: // Safe because we have been initialized to a static, valid profiles array // which is terminated by AV_PIX_FMT_NONE. self.0 = unsafe { self.0.offset(1) }; @@ -418,6 +440,7 @@ pub struct AvCodecContext(*mut ffi::AVCodecContext); impl Drop for AvCodecContext { fn drop(&mut self) { + // SAFETY: // Safe because our context member is properly allocated and owned by us. // Note: `avcodec_open2` might not have been called in case we're wrapped by a // `DecoderContextBuilder` but avcodec_free_context works on both opened and closed @@ -428,6 +451,7 @@ impl Drop for AvCodecContext { impl AsRef for AvCodecContext { fn as_ref(&self) -> &ffi::AVCodecContext { + // SAFETY: // Safe because our context member is properly initialized and fully owned by us. unsafe { &*self.0 } } @@ -442,6 +466,7 @@ pub enum TryReceiveResult { impl AvCodecContext { /// Internal helper for [`DecoderContextBuilder`] to initialize the context. fn init(&mut self, codec: *const ffi::AVCodec) -> Result<(), AvCodecOpenError> { + // SAFETY: // Safe because `codec` is a valid static AVCodec reference, and `self.0` is a valid // AVCodecContext allocation. if unsafe { ffi::avcodec_open2(self.0, codec, std::ptr::null_mut()) } < 0 { @@ -460,6 +485,7 @@ impl AvCodecContext { /// Error codes are the same as those returned by `avcodec_send_packet` with the exception of /// EAGAIN which is converted into `Ok(false)` as it is not actually an error. pub fn try_send_packet(&mut self, packet: &AvPacket) -> Result { + // SAFETY: // Safe because the context is valid through the life of this object, and `packet`'s // lifetime properties ensures its memory area is readable. match unsafe { ffi::avcodec_send_packet(self.0, &packet.packet) } { @@ -479,6 +505,7 @@ impl AvCodecContext { /// Error codes are the same as those returned by `avcodec_receive_frame` with the exception of /// EAGAIN and EOF which are handled as `TryAgain` and `FlushCompleted` respectively. pub fn try_receive_frame(&mut self, frame: &mut AvFrame) -> Result { + // SAFETY: // Safe because the context is valid through the life of this object, and `avframe` is // guaranteed to contain a properly initialized frame. match unsafe { ffi::avcodec_receive_frame(self.0, frame.0) } { @@ -498,6 +525,8 @@ impl AvCodecContext { /// Error codes are the same as those returned by `avcodec_send_frame` with the exception of /// EAGAIN which is converted into `Ok(false)` as it is not actually an error. pub fn try_send_frame(&mut self, frame: &AvFrame) -> Result { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] match unsafe { ffi::avcodec_send_frame(self.0, frame.0 as *const _) } { AVERROR_EAGAIN => Ok(false), ret if ret >= 0 => Ok(true), @@ -518,6 +547,7 @@ impl AvCodecContext { &mut self, packet: &mut AvPacket, ) -> Result { + // SAFETY: // Safe because the context is valid through the life of this object, and `avframe` is // guaranteed to contain a properly initialized frame. match unsafe { ffi::avcodec_receive_packet(self.0, &mut packet.packet) } { @@ -531,6 +561,7 @@ impl AvCodecContext { /// Reset the internal codec state/flush internal buffers. /// Should be called e.g. when seeking or switching to a different stream. pub fn reset(&mut self) { + // SAFETY: // Safe because the context is valid through the life of this object. unsafe { ffi::avcodec_flush_buffers(self.0) } } @@ -540,6 +571,7 @@ impl AvCodecContext { /// /// The flush process is complete when `try_receive_frame` returns `FlushCompleted`, pub fn flush_decoder(&mut self) -> Result<(), AvError> { + // SAFETY: // Safe because the context is valid through the life of this object. AvError::result(unsafe { ffi::avcodec_send_packet(self.0, std::ptr::null()) }) } @@ -549,24 +581,31 @@ impl AvCodecContext { /// /// The flush process is complete when `try_receive_packet` returns `FlushCompleted`, pub fn flush_encoder(&mut self) -> Result<(), AvError> { + // SAFETY: // Safe because the context is valid through the life of this object. AvError::result(unsafe { ffi::avcodec_send_frame(self.0, std::ptr::null()) }) } /// Set the time base for this context. pub fn set_time_base(&mut self, time_base: AVRational) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.0) }; context.time_base = time_base; } /// Set the bit rate for this context. pub fn set_bit_rate(&mut self, bit_rate: u64) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.0) }; context.bit_rate = bit_rate as _; } /// Set the max bit rate (rc_max_rate) for this context. pub fn set_max_bit_rate(&mut self, bit_rate: u64) { + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let context = unsafe { &mut *(self.0) }; context.rc_max_rate = bit_rate as _; } @@ -609,11 +648,13 @@ impl AvBuffer { let mut storage = Box::new(source); extern "C" fn avbuffer_free(opaque: *mut c_void, _data: *mut u8) { + // SAFETY: // Safe because `opaque` has been created from `Box::into_raw`. `storage` will be // dropped immediately which will release any resources held by the storage. let _ = unsafe { Box::from_raw(opaque as *mut D) }; } + // SAFETY: // Safe because storage points to valid data throughout the lifetime of AVBuffer and we are // checking the return value against NULL, which signals an error. Some(Self(unsafe { @@ -630,6 +671,7 @@ impl AvBuffer { /// Return a slice to the data contained in this buffer. pub fn as_mut_slice(&mut self) -> &mut [u8] { + // SAFETY: // Safe because the data has been initialized from valid storage in the constructor. unsafe { std::slice::from_raw_parts_mut((*self.0).data, (*self.0).size) } } @@ -647,6 +689,7 @@ impl AvBuffer { impl Drop for AvBuffer { fn drop(&mut self) { + // SAFETY: // Safe because `self.0` is a valid pointer to an AVBufferRef. unsafe { ffi::av_buffer_unref(&mut self.0) }; } @@ -660,6 +703,7 @@ pub struct AvPacket<'a> { impl<'a> Drop for AvPacket<'a> { fn drop(&mut self) { + // SAFETY: // Safe because `self.packet` is a valid `AVPacket` instance. unsafe { ffi::av_packet_unref(&mut self.packet); @@ -684,6 +728,7 @@ impl<'a> AvPacket<'a> { pts: AV_NOPTS_VALUE as i64, dts: AV_NOPTS_VALUE as i64, pos: -1, + // SAFETY: // Safe because all the other elements of this struct can be zeroed. ..unsafe { std::mem::zeroed() } }, @@ -705,6 +750,7 @@ impl<'a> AvPacket<'a> { size: input_data.len() as c_int, side_data: std::ptr::null_mut(), pos: -1, + // SAFETY: // Safe because all the other elements of this struct can be zeroed. ..unsafe { std::mem::zeroed() } }, @@ -730,6 +776,7 @@ impl<'a> AvPacket<'a> { size, side_data: std::ptr::null_mut(), pos: -1, + // SAFETY: // Safe because all the other elements of this struct can be zeroed. ..unsafe { std::mem::zeroed() } }, @@ -779,6 +826,7 @@ impl AvFrame { /// decoded into. pub fn new() -> Result { Ok(Self( + // SAFETY: // Safe because `av_frame_alloc` does not take any input. unsafe { ffi::av_frame_alloc().as_mut() }.ok_or(AvFrameError::FrameAllocationFailed)?, )) @@ -805,6 +853,7 @@ impl AvFrame { /// Set the picture type (I-frame, P-frame etc.) on this frame. pub fn set_pict_type(&mut self, ty: AVPictureType) { + // SAFETY: // Safe because self.0 is a valid AVFrame reference. unsafe { (*self.0).pict_type = ty; @@ -813,6 +862,7 @@ impl AvFrame { /// Set the presentation timestamp (PTS) of this frame. pub fn set_pts(&mut self, ts: i64) { + // SAFETY: // Safe because self.0 is a valid AVFrame reference. unsafe { (*self.0).pts = ts; @@ -821,6 +871,7 @@ impl AvFrame { /// Query if this AvFrame is writable, i.e. it is refcounted and the refcounts are 1. pub fn is_writable(&self) -> bool { + // SAFETY: // Safe because self.0 is a valid AVFrame reference. unsafe { ffi::av_frame_is_writable(self.0) != 0 } } @@ -830,6 +881,7 @@ impl AvFrame { /// /// [`is_writable`]: AvFrame::is_writable pub fn make_writable(&mut self) -> Result<(), AvFrameError> { + // SAFETY: // Safe because self.0 is a valid AVFrame reference. AvError::result(unsafe { ffi::av_frame_make_writable(self.0) }).map_err(Into::into) } @@ -840,6 +892,7 @@ impl AvFrameBuilder { /// /// The dimensions must not be greater than `i32::MAX`. pub fn set_dimensions(&mut self, dimensions: Dimensions) -> Result<(), AvFrameError> { + // SAFETY: // Safe because self.0 is a valid AVFrame instance and width and height are in range. unsafe { (*self.0 .0).width = dimensions @@ -856,6 +909,7 @@ impl AvFrameBuilder { /// Set the frame's format. pub fn set_format(&mut self, format: AvPixelFormat) -> Result<(), AvFrameError> { + // SAFETY: // Safe because self.0 is a valid AVFrame instance and format is a valid pixel format. unsafe { (*self.0 .0).format = format.pix_fmt(); @@ -919,6 +973,8 @@ impl AvFrameBuilder { if stride < format.line_size(self.0.dimensions().width, plane)? { return Err(AvFrameError::InvalidStride); } + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { (*self.0 .0).data[plane] = buffers[buffer_index].as_mut_slice()[offset..].as_mut_ptr(); @@ -946,6 +1002,7 @@ impl AvFrameBuilder { } for (i, buf) in buffers.into_iter().enumerate() { + // SAFETY: // Safe because self.0 is a valid AVFrame instance and buffers contains valid AvBuffers. unsafe { (*self.0 .0).buf[i] = buf.into_raw(); @@ -957,6 +1014,7 @@ impl AvFrameBuilder { impl AsRef for AvFrame { fn as_ref(&self) -> &ffi::AVFrame { + // SAFETY: // Safe because the AVFrame has been properly initialized during construction. unsafe { &*self.0 } } @@ -966,6 +1024,7 @@ impl Deref for AvFrame { type Target = ffi::AVFrame; fn deref(&self) -> &Self::Target { + // SAFETY: // Safe because the AVFrame has been properly initialized during construction. unsafe { self.0.as_ref().unwrap() } } @@ -973,6 +1032,7 @@ impl Deref for AvFrame { impl Drop for AvFrame { fn drop(&mut self) { + // SAFETY: // Safe because the AVFrame is valid through the life of this object and fully owned by us. unsafe { ffi::av_frame_free(&mut self.0) }; } diff --git a/media/ffmpeg/src/avutil.rs b/media/ffmpeg/src/avutil.rs index 88ffc6b279..b16c775f06 100644 --- a/media/ffmpeg/src/avutil.rs +++ b/media/ffmpeg/src/avutil.rs @@ -12,6 +12,7 @@ const MAX_FFMPEG_PLANES: usize = 4; /// Get the maximum data alignment that may be required by FFmpeg. /// This could change depending on FFmpeg's build configuration (AVX etc.). pub fn max_buffer_alignment() -> usize { + // SAFETY: // Safe because this function has no side effects and just returns an integer. unsafe { ffi::av_cpu_max_align() } } @@ -22,6 +23,7 @@ pub(crate) fn av_image_line_size( width: u32, plane: usize, ) -> Result { + // SAFETY: // Safe because format is a valid format and this function is pure computation. match unsafe { ffi::av_image_get_linesize(format.pix_fmt(), width as _, plane as _) } { i if i >= 0 => Ok(i as _), @@ -42,6 +44,7 @@ pub(crate) fn av_image_plane_sizes>( planes += 1; } let mut plane_sizes_buf = [0; MAX_FFMPEG_PLANES]; + // SAFETY: // Safe because plane_sizes_buf and linesizes_buf have the size specified by the API, format is // valid, and this function doesn't have any side effects other than writing to plane_sizes_buf. AvError::result(unsafe { diff --git a/media/ffmpeg/src/error.rs b/media/ffmpeg/src/error.rs index 62dcbc5c8d..d9fffd4bdd 100644 --- a/media/ffmpeg/src/error.rs +++ b/media/ffmpeg/src/error.rs @@ -23,6 +23,8 @@ mod test { fn test_averror(averror: c_int, expected_message: &str) { let mut buffer = [0u8; 255]; + // TODO(b:315859322): add safety doc string + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { ffi::av_strerror(averror, buffer.as_mut_ptr() as *mut c_char, buffer.len()) }; assert_eq!(ret, 0); diff --git a/media/ffmpeg/src/lib.rs b/media/ffmpeg/src/lib.rs index 4325e9b807..4d327e9943 100644 --- a/media/ffmpeg/src/lib.rs +++ b/media/ffmpeg/src/lib.rs @@ -11,6 +11,7 @@ mod error; pub use error::*; mod ffi { #![allow(clippy::missing_safety_doc)] + #![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/media/ffmpeg/src/swscale.rs b/media/ffmpeg/src/swscale.rs index 29c938a67e..a0103dc3c3 100644 --- a/media/ffmpeg/src/swscale.rs +++ b/media/ffmpeg/src/swscale.rs @@ -37,6 +37,7 @@ pub enum ConversionError { impl Drop for SwConverter { fn drop(&mut self) { + // SAFETY: // Safe because `sws_context` is valid through the life of this object. unsafe { ffi::sws_freeContext(self.sws_context) }; } @@ -53,6 +54,7 @@ impl SwConverter { src_pix_format: ffi::AVPixelFormat, dst_pix_format: ffi::AVPixelFormat, ) -> anyhow::Result { + // SAFETY: // Safe because we don't pass any non-null pointer to this function. let sws_context = unsafe { ffi::sws_getContext( @@ -112,6 +114,7 @@ impl SwConverter { return Err(ConversionError::NotWritable); } + // SAFETY: // Safe because `sws_context`, `src_ref.data` and `dst_data` are all valid pointers, and // we made sure the sizes provided are within the bounds of the buffers. AvError::result(unsafe { diff --git a/metrics/src/sys/windows/system_metrics.rs b/metrics/src/sys/windows/system_metrics.rs index 20f90ef2d1..86b9d781be 100644 --- a/metrics/src/sys/windows/system_metrics.rs +++ b/metrics/src/sys/windows/system_metrics.rs @@ -249,6 +249,7 @@ impl Worker { let mut counters = PROCESS_MEMORY_COUNTERS_EX::default(); + // SAFETY: // Safe because we own the process handle and all memory was allocated. let result = unsafe { GetProcessMemoryInfo( @@ -280,6 +281,7 @@ impl Worker { let mut process_time: ProcessCpuTime = Default::default(); let sys_time_success: i32; + // SAFETY: // Safe because memory is allocated for sys_time before the windows call. // And the value were initilized to 0s. unsafe { @@ -294,6 +296,7 @@ impl Worker { // Query current process cpu time. let process_handle = CoreWinMetrics::get_process_handle()?; let process_time_success: i32; + // SAFETY: // Safe because memory is allocated for process_time before the windows call. // And the value were initilized to 0s. unsafe { @@ -328,6 +331,7 @@ impl Worker { fn get_io_metrics(&self) -> SysResult { let process_handle = CoreWinMetrics::get_process_handle()?; let mut io_counters = IO_COUNTERS::default(); + // SAFETY: // Safe because we own the process handle and all memory was allocated. let result = unsafe { GetProcessIoCounters( @@ -454,6 +458,7 @@ impl Worker { } fn compute_filetime_subtraction(fta: FILETIME, ftb: FILETIME) -> LONGLONG { + // SAFETY: // safe because we are initializing the struct to 0s. unsafe { let mut a: LARGE_INTEGER = mem::zeroed::(); @@ -584,6 +589,7 @@ impl CoreWinMetrics { } fn get_process_handle() -> SysResult { + // SAFETY: // Safe because we own the current process. let process_handle = unsafe { OpenProcess( @@ -595,6 +601,7 @@ impl CoreWinMetrics { if process_handle.is_null() { return Err(SysError::last()); } + // SAFETY: // Safe as the SafeDescriptor is the only thing with access to the handle after this. Ok(unsafe { SafeDescriptor::from_raw_descriptor(process_handle) }) } diff --git a/net_sys/src/if_tun.rs b/net_sys/src/if_tun.rs index 6ad329468a..9ae9d5be02 100644 --- a/net_sys/src/if_tun.rs +++ b/net_sys/src/if_tun.rs @@ -1,6 +1,7 @@ /* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] @@ -38,6 +39,7 @@ pub struct sock_fprog { impl Default for sock_fprog { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/net_sys/src/iff.rs b/net_sys/src/iff.rs index cb9f5a6aa4..9fa25c50c3 100644 --- a/net_sys/src/iff.rs +++ b/net_sys/src/iff.rs @@ -1,6 +1,7 @@ /* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/net_util/src/slirp/sys/windows.rs b/net_util/src/slirp/sys/windows.rs index 8bad4550b5..2c9457a2d9 100644 --- a/net_util/src/slirp/sys/windows.rs +++ b/net_util/src/slirp/sys/windows.rs @@ -238,6 +238,7 @@ impl TapTCommon for Slirp { impl Read for Slirp { fn read(&mut self, buf: &mut [u8]) -> IoResult { + // SAFETY: // Safe because we are reading simple bytes. unsafe { self.guest_pipe diff --git a/net_util/src/slirp/sys/windows/handler.rs b/net_util/src/slirp/sys/windows/handler.rs index c7dc2cc2ba..66099bebb0 100644 --- a/net_util/src/slirp/sys/windows/handler.rs +++ b/net_util/src/slirp/sys/windows/handler.rs @@ -205,6 +205,7 @@ impl CallbackHandler for Handler { } fn begin_read_from_guest(&mut self) -> io::Result<()> { + // SAFETY: // Safe because we are writing simple bytes. unsafe { self.pipe @@ -288,10 +289,14 @@ impl Drop for Handler { } fn last_wsa_error() -> io::Error { - io::Error::from_raw_os_error(unsafe { WSAGetLastError() }) + io::Error::from_raw_os_error( + // SAFETY: trivially safe + unsafe { WSAGetLastError() }, + ) } fn poll_sockets(mut sockets: Vec) -> io::Result> { + // SAFETY: // Safe because sockets is guaranteed to be valid, and we handle error return codes below. let poll_result = unsafe { WSAPoll( @@ -373,6 +378,7 @@ struct EventSelectedSocket<'a> { impl<'a> EventSelectedSocket<'a> { fn new(socket: WSAPOLLFD, event: &'a Event) -> Result { + // SAFETY: // Safe because socket.fd exists, the event handle is guaranteed to exist, and we check the // return code below. let res = unsafe { @@ -393,6 +399,7 @@ impl<'a> EventSelectedSocket<'a> { impl<'a> Drop for EventSelectedSocket<'a> { fn drop(&mut self) { + // SAFETY: // Safe because socket.fd exists, the event handle is guaranteed to exist, and we check the // return code below. let res = unsafe { @@ -490,9 +497,11 @@ struct WSAContext { impl WSAContext { fn new() -> Result { + // SAFETY: // Trivially safe (initialization of this memory is not required). let mut ctx: WSAContext = unsafe { std::mem::zeroed() }; + // SAFETY: // Safe because ctx.data is guaranteed to exist, and we check the return code. let err = unsafe { WSAStartup(MAKEWORD(2, 0), &mut ctx.data) }; if err != 0 { @@ -507,6 +516,7 @@ impl WSAContext { impl Drop for WSAContext { fn drop(&mut self) { + // SAFETY: trivially safe with return value checked. let err = unsafe { WSACleanup() }; if err != 0 { error!("WSACleanup failed: {}", last_wsa_error()) @@ -982,6 +992,8 @@ mod tests { .unwrap(); let mut recv_buffer: [u8; 512] = [0; 512]; + // SAFETY: safe because the buffer & overlapped wrapper are in scope for + // the duration of the overlapped operation. unsafe { guest_pipe.read_overlapped(&mut recv_buffer, &mut overlapped_wrapper) }.unwrap(); let size = guest_pipe .get_overlapped_result(&mut overlapped_wrapper) diff --git a/net_util/src/sys/linux/tap.rs b/net_util/src/sys/linux/tap.rs index 1b0847d8d2..e7d157c662 100644 --- a/net_util/src/sys/linux/tap.rs +++ b/net_util/src/sys/linux/tap.rs @@ -76,6 +76,7 @@ impl Tap { } pub fn create_tap_with_ifreq(ifreq: &mut net_sys::ifreq) -> Result { + // SAFETY: // Open calls are safe because we give a constant nul-terminated // string and verify the result. let rd = unsafe { @@ -88,8 +89,10 @@ impl Tap { return Err(Error::OpenTun(SysError::last())); } + // SAFETY: // We just checked that the fd is valid. let tuntap = unsafe { File::from_raw_descriptor(rd) }; + // SAFETY: // ioctl is safe since we call it with a valid tap fd and check the return // value. let ret = unsafe { ioctl_with_mut_ref(&tuntap, net_sys::TUNSETIFF(), ifreq) }; @@ -98,10 +101,13 @@ impl Tap { return Err(Error::CreateTap(SysError::last())); } - // Safe since only the name is accessed, and it's copied out. Ok(Tap { tap_file: tuntap, + // SAFETY: + // Safe since only the name is accessed, and it's copied out. if_name: unsafe { ifreq.ifr_ifrn.ifrn_name }, + // SAFETY: + // Safe since only the name is accessed, and it's copied out. if_flags: unsafe { ifreq.ifr_ifru.ifru_flags }, }) } @@ -109,6 +115,7 @@ impl Tap { fn get_ifreq(&self) -> net_sys::ifreq { let mut ifreq: net_sys::ifreq = Default::default(); + // SAFETY: // This sets the name of the interface, which is the only entry // in a single-field union. unsafe { @@ -149,10 +156,11 @@ impl TapTCommon for Tap { } fn new_with_name(name: &[u8], vnet_hdr: bool, multi_vq: bool) -> Result { + let mut ifreq: net_sys::ifreq = Default::default(); + // SAFETY: // This is pretty messy because of the unions used by ifreq. Since we // don't call as_mut on the same union field more than once, this block // is safe. - let mut ifreq: net_sys::ifreq = Default::default(); unsafe { let ifrn_name = ifreq.ifr_ifrn.ifrn_name.as_mut(); for (dst, src) in ifrn_name @@ -199,6 +207,7 @@ impl TapTCommon for Tap { let sock = create_socket()?; let mut ifreq = self.get_ifreq(); + // SAFETY: // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = unsafe { ioctl_with_mut_ref(&sock, net_sys::sockios::SIOCGIFADDR as IoctlNr, &mut ifreq) @@ -208,6 +217,7 @@ impl TapTCommon for Tap { return Err(Error::IoctlError(SysError::last())); } + // SAFETY: // We only access one field of the ifru union, hence this is safe. let addr = unsafe { ifreq.ifr_ifru.ifru_addr }; @@ -221,8 +231,9 @@ impl TapTCommon for Tap { let mut ifreq = self.get_ifreq(); ifreq.ifr_ifru.ifru_addr = addr; - // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = + // SAFETY: + // ioctl is safe. Called with a valid sock descriptor, and we check the return. unsafe { ioctl_with_ref(&sock, net_sys::sockios::SIOCSIFADDR as IoctlNr, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(SysError::last())); @@ -235,6 +246,7 @@ impl TapTCommon for Tap { let sock = create_socket()?; let mut ifreq = self.get_ifreq(); + // SAFETY: // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = unsafe { ioctl_with_mut_ref( @@ -247,6 +259,7 @@ impl TapTCommon for Tap { return Err(Error::IoctlError(SysError::last())); } + // SAFETY: // We only access one field of the ifru union, hence this is safe. let addr = unsafe { ifreq.ifr_ifru.ifru_netmask }; @@ -260,8 +273,9 @@ impl TapTCommon for Tap { let mut ifreq = self.get_ifreq(); ifreq.ifr_ifru.ifru_netmask = addr; - // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = + // SAFETY: + // ioctl is safe. Called with a valid sock descriptor, and we check the return. unsafe { ioctl_with_ref(&sock, net_sys::sockios::SIOCSIFNETMASK as IoctlNr, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(SysError::last())); @@ -274,6 +288,7 @@ impl TapTCommon for Tap { let sock = create_socket()?; let mut ifreq = self.get_ifreq(); + // SAFETY: // ioctl is safe. Called with a valid sock fd, and we check the return. let ret = unsafe { ioctl_with_mut_ref(&sock, net_sys::sockios::SIOCGIFMTU as IoctlNr, &mut ifreq) @@ -282,6 +297,7 @@ impl TapTCommon for Tap { return Err(Error::IoctlError(SysError::last())); } + // SAFETY: // We only access one field of the ifru union, hence this is safe. let mtu = unsafe { ifreq.ifr_ifru.ifru_mtu } as u16; Ok(mtu) @@ -293,6 +309,7 @@ impl TapTCommon for Tap { let mut ifreq = self.get_ifreq(); ifreq.ifr_ifru.ifru_mtu = i32::from(mtu); + // SAFETY: // ioctl is safe. Called with a valid sock fd, and we check the return. let ret = unsafe { ioctl_with_ref(&sock, net_sys::sockios::SIOCSIFMTU as IoctlNr, &ifreq) }; if ret < 0 { @@ -306,6 +323,7 @@ impl TapTCommon for Tap { let sock = create_socket()?; let mut ifreq = self.get_ifreq(); + // SAFETY: // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = unsafe { ioctl_with_mut_ref( @@ -318,6 +336,7 @@ impl TapTCommon for Tap { return Err(Error::IoctlError(SysError::last())); } + // SAFETY: // We only access one field of the ifru union, hence this is safe. let sa: libc::sockaddr = unsafe { ifreq.ifr_ifru.ifru_hwaddr }; @@ -355,8 +374,9 @@ impl TapTCommon for Tap { let mut ifreq = self.get_ifreq(); ifreq.ifr_ifru.ifru_hwaddr = sa; - // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = + // SAFETY: + // ioctl is safe. Called with a valid sock descriptor, and we check the return. unsafe { ioctl_with_ref(&sock, net_sys::sockios::SIOCSIFHWADDR as IoctlNr, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(SysError::last())); @@ -366,8 +386,9 @@ impl TapTCommon for Tap { } fn set_offload(&self, flags: c_uint) -> Result<()> { - // ioctl is safe. Called with a valid tap descriptor, and we check the return. let ret = + // SAFETY: + // ioctl is safe. Called with a valid tap descriptor, and we check the return. unsafe { ioctl_with_val(&self.tap_file, net_sys::TUNSETOFFLOAD(), flags as c_ulong) }; if ret < 0 { return Err(Error::IoctlError(SysError::last())); @@ -383,8 +404,9 @@ impl TapTCommon for Tap { ifreq.ifr_ifru.ifru_flags = (net_sys::net_device_flags::IFF_UP | net_sys::net_device_flags::IFF_RUNNING).0 as i16; - // ioctl is safe. Called with a valid sock descriptor, and we check the return. let ret = + // SAFETY: + // ioctl is safe. Called with a valid sock descriptor, and we check the return. unsafe { ioctl_with_ref(&sock, net_sys::sockios::SIOCSIFFLAGS as IoctlNr, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(SysError::last())); @@ -397,6 +419,7 @@ impl TapTCommon for Tap { self.try_clone() } + // SAFETY: // Safe if caller provides a valid descriptor. unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Result { Tap::from_raw_descriptor(descriptor) @@ -406,6 +429,7 @@ impl TapTCommon for Tap { impl TapTLinux for Tap { fn set_vnet_hdr_size(&self, size: usize) -> Result<()> { let size = size as c_int; + // SAFETY: // ioctl is safe. Called with a valid tap descriptor, and we check the return. let ret = unsafe { ioctl_with_ref(&self.tap_file, net_sys::TUNSETVNETHDRSZ(), &size) }; if ret < 0 { @@ -455,18 +479,22 @@ impl ReadNotifier for Tap { } fn create_socket() -> Result { + // SAFETY: // This is safe since we check the return value. let sock = unsafe { libc::socket(libc::AF_INET, libc::SOCK_DGRAM, 0) }; if sock >= 0 { + // SAFETY: // This is safe; nothing else will use or hold onto the raw sock descriptor. return Ok(unsafe { net::UdpSocket::from_raw_fd(sock) }); } warn!("INET not supported on this machine. Trying to open an INET6 socket."); + // SAFETY: // Open an AF_INET6 socket let sock6 = unsafe { libc::socket(libc::AF_INET6, libc::SOCK_DGRAM, 0) }; if sock6 >= 0 { + // SAFETY: // This is safe; nothing else will use or hold onto the raw sock descriptor. return Ok(unsafe { net::UdpSocket::from_raw_fd(sock6) }); } @@ -484,16 +512,19 @@ fn create_sockaddr(ip_addr: net::Ipv4Addr) -> libc::sockaddr { let addr_in = libc::sockaddr_in { sin_family: libc::AF_INET as u16, sin_port: 0, + // SAFETY: trivially safe sin_addr: unsafe { mem::transmute(ip_addr.octets()) }, sin_zero: [0; 8usize], }; + // SAFETY: trivially safe unsafe { mem::transmute(addr_in) } } /// Extract the IPv4 address from a sockaddr. Assumes the sockaddr is a sockaddr_in. fn read_ipv4_addr(addr: &libc::sockaddr) -> net::Ipv4Addr { debug_assert_eq!(addr.sa_family as i32, libc::AF_INET); + // SAFETY: // This is safe because sockaddr and sockaddr_in are the same size, and we've checked that // this address is AF_INET. let in_addr: libc::sockaddr_in = unsafe { mem::transmute(*addr) }; diff --git a/protos/src/plugin.rs b/protos/src/plugin.rs index c12e60e2af..2faff03c8a 100644 --- a/protos/src/plugin.rs +++ b/protos/src/plugin.rs @@ -9,7 +9,7 @@ pub use crate::generated::plugin::*; /// Converts protobuf representation of CpuId data into KVM format. #[cfg(target_arch = "x86_64")] pub fn cpuid_proto_to_kvm(entry: &CpuidEntry) -> kvm_sys::kvm_cpuid_entry2 { - // Safe: C structures are expected to be zero-initialized. + // SAFETY: C structures are expected to be zero-initialized. let mut e: kvm_sys::kvm_cpuid_entry2 = unsafe { std::mem::zeroed() }; e.function = entry.function; if entry.has_index { diff --git a/rutabaga_gfx/src/cross_domain/mod.rs b/rutabaga_gfx/src/cross_domain/mod.rs index 6eb7ec9883..260c1b6b75 100644 --- a/rutabaga_gfx/src/cross_domain/mod.rs +++ b/rutabaga_gfx/src/cross_domain/mod.rs @@ -230,9 +230,9 @@ impl CrossDomainState { .backing_iovecs .as_mut() .ok_or(RutabagaError::InvalidIovec)?; - - // Safe because we've verified the iovecs are attached and owned only by this context. let slice = + // SAFETY: + // Safe because we've verified the iovecs are attached and owned only by this context. unsafe { std::slice::from_raw_parts_mut(iovecs[0].base as *mut u8, iovecs[0].len) }; match ring_write { diff --git a/rutabaga_gfx/src/cross_domain/sys/epoll_internal.rs b/rutabaga_gfx/src/cross_domain/sys/epoll_internal.rs index 5c2c2206c5..8a93a118f9 100644 --- a/rutabaga_gfx/src/cross_domain/sys/epoll_internal.rs +++ b/rutabaga_gfx/src/cross_domain/sys/epoll_internal.rs @@ -32,6 +32,7 @@ impl EpollEvent { } pub fn empty() -> Self { + // SAFETY: trivially safe unsafe { mem::zeroed::() } } @@ -52,8 +53,12 @@ impl Epoll { /// /// [`epoll_create1`](https://man7.org/linux/man-pages/man2/epoll_create1.2.html). pub fn new(flags: EpollCreateFlags) -> Result { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let res = unsafe { libc::epoll_create1(flags.bits()) }; let fd = Errno::result(res)?; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let owned_fd = unsafe { OwnedFd::from_raw_fd(fd) }; Ok(Self(owned_fd)) } @@ -83,6 +88,8 @@ impl Epoll { /// /// [`epoll_wait`](https://man7.org/linux/man-pages/man2/epoll_wait.2.html) pub fn wait(&self, events: &mut [EpollEvent], timeout: isize) -> Result { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let res = unsafe { libc::epoll_wait( self.0.as_raw_fd(), @@ -109,6 +116,8 @@ impl Epoll { let ptr = event .map(|x| &mut x.event as *mut libc::epoll_event) .unwrap_or(std::ptr::null_mut()); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { Errno::result(libc::epoll_ctl( self.0.as_raw_fd(), diff --git a/rutabaga_gfx/src/cross_domain/sys/linux.rs b/rutabaga_gfx/src/cross_domain/sys/linux.rs index 58a4f6e53d..8f8768e988 100644 --- a/rutabaga_gfx/src/cross_domain/sys/linux.rs +++ b/rutabaga_gfx/src/cross_domain/sys/linux.rs @@ -120,6 +120,7 @@ impl CrossDomainState { Some(ControlMessageOwned::ScmRights(fds)) => { fds.into_iter() .map(|fd| { + // SAFETY: // Safe since the descriptors from recv_with_fds(..) are owned by us and valid. unsafe { File::from_raw_descriptor(fd) } }) @@ -210,7 +211,9 @@ impl CrossDomainContext { } let (raw_read_pipe, raw_write_pipe) = pipe()?; + // SAFETY: Safe because we have created the pipe above and is valid. let read_pipe = unsafe { File::from_raw_descriptor(raw_read_pipe) }; + // SAFETY: Safe because we have created the pipe above and is valid. let write_pipe = unsafe { File::from_raw_descriptor(raw_write_pipe) }; *descriptor = write_pipe.as_raw_descriptor(); diff --git a/rutabaga_gfx/src/generated/virgl_renderer_bindings.rs b/rutabaga_gfx/src/generated/virgl_renderer_bindings.rs index 9beedf005e..f09f3b942e 100644 --- a/rutabaga_gfx/src/generated/virgl_renderer_bindings.rs +++ b/rutabaga_gfx/src/generated/virgl_renderer_bindings.rs @@ -359,6 +359,8 @@ pub struct virgl_renderer_resource_info_ext { impl Default for virgl_renderer_resource_info_ext { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -415,6 +417,8 @@ pub struct virgl_renderer_resource_create_blob_args { impl Default for virgl_renderer_resource_create_blob_args { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/rutabaga_gfx/src/gfxstream.rs b/rutabaga_gfx/src/gfxstream.rs index a2e20ba4a2..545eecc811 100644 --- a/rutabaga_gfx/src/gfxstream.rs +++ b/rutabaga_gfx/src/gfxstream.rs @@ -205,6 +205,8 @@ impl RutabagaContext for GfxstreamContext { return Err(RutabagaError::InvalidCommandSize(commands.len())); } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { let cmd = stream_renderer_command { ctx_id: self.ctx_id, @@ -220,6 +222,7 @@ impl RutabagaContext for GfxstreamContext { } fn attach(&mut self, resource: &mut RutabagaResource) { + // SAFETY: // The context id and resource id must be valid because the respective instances ensure // their lifetime. unsafe { @@ -228,6 +231,7 @@ impl RutabagaContext for GfxstreamContext { } fn detach(&mut self, resource: &RutabagaResource) { + // SAFETY: // The context id and resource id must be valid because the respective instances ensure // their lifetime. unsafe { @@ -245,6 +249,7 @@ impl RutabagaContext for GfxstreamContext { return Ok(()); } + // SAFETY: // Safe because RutabagaFences and stream_renderer_fence are ABI identical let ret = unsafe { stream_renderer_create_fence(&fence as *const stream_renderer_fence) }; @@ -254,6 +259,7 @@ impl RutabagaContext for GfxstreamContext { impl Drop for GfxstreamContext { fn drop(&mut self) { + // SAFETY: // The context is safe to destroy because nothing else can be referencing it. unsafe { stream_renderer_context_destroy(self.ctx_id); @@ -264,8 +270,11 @@ impl Drop for GfxstreamContext { extern "C" fn write_context_fence(cookie: *mut c_void, fence: *const RutabagaFence) { catch_unwind(|| { assert!(!cookie.is_null()); + // SAFETY: + // We trust gfxstream not give a dangling pointer let cookie = unsafe { &*(cookie as *mut RutabagaCookie) }; if let Some(handler) = &cookie.fence_handler { + // SAFETY: // We trust gfxstream not give a dangling pointer unsafe { handler.call(*fence) }; } @@ -276,8 +285,11 @@ extern "C" fn write_context_fence(cookie: *mut c_void, fence: *const RutabagaFen extern "C" fn gfxstream_debug_callback(cookie: *mut c_void, debug: *const stream_renderer_debug) { catch_unwind(|| { assert!(!cookie.is_null()); + // SAFETY: + // We trust gfxstream not give a dangling pointer let cookie = unsafe { &*(cookie as *mut RutabagaCookie) }; if let Some(handler) = &cookie.debug_handler { + // SAFETY: // We trust gfxstream not give a dangling pointer unsafe { handler.call(*debug) }; } @@ -336,6 +348,8 @@ impl Gfxstream { }, ]; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { ret_to_res(stream_renderer_init( stream_renderer_params.as_mut_ptr(), @@ -348,6 +362,7 @@ impl Gfxstream { fn map_info(&self, resource_id: u32) -> RutabagaResult { let mut map_info = 0; + // SAFETY: // Safe because `map_info` is a local stack variable owned by us. let ret = unsafe { stream_renderer_resource_map_info(resource_id, &mut map_info) }; ret_to_res(ret)?; @@ -357,6 +372,7 @@ impl Gfxstream { fn vulkan_info(&self, resource_id: u32) -> RutabagaResult { let mut vulkan_info: stream_renderer_vulkan_info = Default::default(); + // SAFETY: // Safe because `vulkan_info` is a local stack variable owned by us. let ret = unsafe { stream_renderer_vulkan_info(resource_id, &mut vulkan_info) }; ret_to_res(ret)?; @@ -372,12 +388,15 @@ impl Gfxstream { fn export_blob(&self, resource_id: u32) -> RutabagaResult> { let mut stream_handle: stream_renderer_handle = Default::default(); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { stream_renderer_export_blob(resource_id, &mut stream_handle) }; ret_to_res(ret)?; + let raw_descriptor = stream_handle.os_handle as RawDescriptor; + // SAFETY: // Safe because the handle was just returned by a successful gfxstream call so it must be // valid and owned by us. - let raw_descriptor = stream_handle.os_handle as RawDescriptor; let handle = unsafe { SafeDescriptor::from_raw_descriptor(raw_descriptor) }; Ok(Arc::new(RutabagaHandle { @@ -389,7 +408,7 @@ impl Gfxstream { impl Drop for Gfxstream { fn drop(&mut self) { - // SAFETY: Safe because Gfxstream was succesfully initialized. + // SAFETY: Safe because Gfxstream was successfully initialized. unsafe { stream_renderer_teardown(); } @@ -400,6 +419,7 @@ impl RutabagaComponent for Gfxstream { fn get_capset_info(&self, capset_id: u32) -> (u32, u32) { let mut version = 0; let mut size = 0; + // SAFETY: // Safe because gfxstream is initialized by now and properly size stack variables are // used for the pointers. unsafe { @@ -411,6 +431,7 @@ impl RutabagaComponent for Gfxstream { fn get_capset(&self, capset_id: u32, version: u32) -> Vec { let (_, max_size) = self.get_capset_info(capset_id); let mut buf = vec![0u8; max_size as usize]; + // SAFETY: // Safe because gfxstream is initialized by now and the given buffer is sized properly // for the given cap id/version. unsafe { @@ -421,6 +442,7 @@ impl RutabagaComponent for Gfxstream { } fn create_fence(&mut self, fence: RutabagaFence) -> RutabagaResult<()> { + // SAFETY: // Safe because RutabagaFences and stream_renderer_fence are ABI identical let ret = unsafe { stream_renderer_create_fence(&fence as *const stream_renderer_fence) }; ret_to_res(ret) @@ -445,6 +467,7 @@ impl RutabagaComponent for Gfxstream { flags: resource_create_3d.flags, }; + // SAFETY: // Safe because gfxstream is initialized by now, and the return value is checked before // returning a new resource. The backing buffers are not supplied with this call. let ret = unsafe { stream_renderer_resource_create(&mut args, null_mut(), 0) }; @@ -472,6 +495,8 @@ impl RutabagaComponent for Gfxstream { resource_id: u32, vecs: &mut Vec, ) -> RutabagaResult<()> { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { stream_renderer_resource_attach_iov( resource_id as i32, @@ -483,6 +508,8 @@ impl RutabagaComponent for Gfxstream { } fn detach_backing(&self, resource_id: u32) { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { stream_renderer_resource_detach_iov( resource_id as i32, @@ -493,6 +520,7 @@ impl RutabagaComponent for Gfxstream { } fn unref_resource(&self, resource_id: u32) { + // SAFETY: // The resource is safe to unreference destroy because no user of these bindings can still // be holding a reference. unsafe { @@ -519,6 +547,7 @@ impl RutabagaComponent for Gfxstream { d: transfer.d, }; + // SAFETY: // Safe because only stack variables of the appropriate type are used. let ret = unsafe { stream_renderer_transfer_write_iov( @@ -570,6 +599,7 @@ impl RutabagaComponent for Gfxstream { None => (null_mut(), 0), }; + // SAFETY: // Safe because only stack variables of the appropriate type are used. let ret = unsafe { stream_renderer_transfer_read_iov( @@ -588,6 +618,8 @@ impl RutabagaComponent for Gfxstream { } fn resource_flush(&self, resource: &mut RutabagaResource) -> RutabagaResult<()> { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { stream_renderer_flush(resource.resource_id); } @@ -617,6 +649,8 @@ impl RutabagaComponent for Gfxstream { handle_ptr = &stream_handle; } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { stream_renderer_create_blob( ctx_id, @@ -651,6 +685,7 @@ impl RutabagaComponent for Gfxstream { let mut map: *mut c_void = null_mut(); let mut size: u64 = 0; + // SAFETY: // Safe because the Stream renderer wraps and validates use of vkMapMemory. let ret = unsafe { stream_renderer_resource_map(resource_id, &mut map, &mut size) }; if ret != 0 { @@ -663,6 +698,8 @@ impl RutabagaComponent for Gfxstream { } fn unmap(&self, resource_id: u32) -> RutabagaResult<()> { + // SAFETY: + // Safe because the Stream renderer wraps and validates use of vkMapMemory. let ret = unsafe { stream_renderer_resource_unmap(resource_id) }; ret_to_res(ret) } @@ -679,6 +716,7 @@ impl RutabagaComponent for Gfxstream { name = name_string; } + // SAFETY: // Safe because gfxstream is initialized by now and the context name is statically // allocated. The return value is checked before returning a new context. let ret = unsafe { diff --git a/rutabaga_gfx/src/rutabaga_2d.rs b/rutabaga_gfx/src/rutabaga_2d.rs index af105b5e6f..8ba9c3ef8a 100644 --- a/rutabaga_gfx/src/rutabaga_2d.rs +++ b/rutabaga_gfx/src/rutabaga_2d.rs @@ -212,6 +212,7 @@ impl RutabagaComponent for Rutabaga2D { let resource_bpp = 4; let mut src_slices = Vec::with_capacity(iovecs.len()); for iovec in &iovecs { + // SAFETY: // Safe because Rutabaga users should have already checked the iovecs. let slice = unsafe { std::slice::from_raw_parts(iovec.base as *mut u8, iovec.len) }; src_slices.push(slice); diff --git a/rutabaga_gfx/src/rutabaga_gralloc/minigbm.rs b/rutabaga_gfx/src/rutabaga_gralloc/minigbm.rs index 346b6bfa42..812f32a27c 100644 --- a/rutabaga_gfx/src/rutabaga_gralloc/minigbm.rs +++ b/rutabaga_gfx/src/rutabaga_gralloc/minigbm.rs @@ -31,12 +31,16 @@ struct MinigbmDeviceInner { gbm: *mut gbm_device, } +// SAFETY: // Safe because minigbm handles synchronization internally. unsafe impl Send for MinigbmDeviceInner {} +// SAFETY: +// Safe because minigbm handles synchronization internally. unsafe impl Sync for MinigbmDeviceInner {} impl Drop for MinigbmDeviceInner { fn drop(&mut self) { + // SAFETY: // Safe because MinigbmDeviceInner is only constructed with a valid minigbm_device. unsafe { gbm_device_destroy(self.gbm); @@ -59,6 +63,7 @@ impl MinigbmDevice { let undesired: &[&str] = &["vgem", "pvr"]; let fd = rendernode::open_device(undesired)?; + // SAFETY: // gbm_create_device is safe to call with a valid fd, and we check that a valid one is // returned. If the fd does not refer to a DRM device, gbm_create_device will reject it. let gbm = unsafe { gbm_create_device(fd.as_raw_descriptor()) }; @@ -66,9 +71,13 @@ impl MinigbmDevice { return Err(RutabagaError::IoError(Error::last_os_error())); } + // SAFETY: // Safe because a valid minigbm device has a statically allocated string associated with // it, which is valid for the lifetime of the process. let backend_name: *const c_char = unsafe { gbm_device_get_backend_name(gbm) }; + // SAFETY: + // Safe because a valid minigbm device has a statically allocated string associated with + // it, which is valid for the lifetime of the process. let c_str: &CStr = unsafe { CStr::from_ptr(backend_name) }; let device_name: &str = c_str.to_str()?; @@ -93,6 +102,8 @@ impl Gralloc for MinigbmDevice { &mut self, info: ImageAllocationInfo, ) -> RutabagaResult { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let bo = unsafe { gbm_bo_create( self.minigbm_device.gbm, @@ -158,6 +169,8 @@ impl Gralloc for MinigbmDevice { }); } + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let bo = unsafe { gbm_bo_create( self.minigbm_device.gbm, @@ -184,58 +197,70 @@ impl Gralloc for MinigbmDevice { /// An allocation from a `MinigbmDevice`. pub struct MinigbmBuffer(*mut gbm_bo, MinigbmDevice); +// SAFETY: // Safe because minigbm handles synchronization internally. unsafe impl Send for MinigbmBuffer {} +// SAFETY: +// Safe because minigbm handles synchronization internally. unsafe impl Sync for MinigbmBuffer {} impl MinigbmBuffer { /// Width in pixels. pub fn width(&self) -> u32 { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_width(self.0) } } /// Height in pixels. pub fn height(&self) -> u32 { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_height(self.0) } } /// `DrmFormat` of the buffer. pub fn format(&self) -> DrmFormat { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { DrmFormat(gbm_bo_get_format(self.0)) } } /// DrmFormat modifier flags for the buffer. pub fn format_modifier(&self) -> u64 { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_modifier(self.0) } } /// Number of planes present in this buffer. pub fn num_planes(&self) -> usize { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_plane_count(self.0) as usize } } /// Offset in bytes for the given plane. pub fn plane_offset(&self, plane: usize) -> u32 { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_offset(self.0, plane) } } /// Length in bytes of one row for the given plane. pub fn plane_stride(&self, plane: usize) -> u32 { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_get_stride_for_plane(self.0, plane) } } /// Exports a new dmabuf/prime file descriptor. pub fn export(&self) -> RutabagaResult { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. match unsafe { gbm_bo_get_fd(self.0) } { fd if fd >= 0 => { + // SAFETY: fd is expected to be valid. let dmabuf = unsafe { File::from_raw_descriptor(fd) }; Ok(dmabuf) } @@ -246,6 +271,7 @@ impl MinigbmBuffer { impl Drop for MinigbmBuffer { fn drop(&mut self) { + // SAFETY: // This is always safe to call with a valid gbm_bo pointer. unsafe { gbm_bo_destroy(self.0) } } diff --git a/rutabaga_gfx/src/rutabaga_gralloc/rendernode.rs b/rutabaga_gfx/src/rutabaga_gralloc/rendernode.rs index e7863fbc3c..4ff5f4e880 100644 --- a/rutabaga_gfx/src/rutabaga_gralloc/rendernode.rs +++ b/rutabaga_gfx/src/rutabaga_gralloc/rendernode.rs @@ -66,6 +66,8 @@ fn get_drm_device_name(fd: &File) -> RutabagaResult { desc: null_mut(), }; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] // Get the length of the device name. unsafe { drm_get_version(fd.as_raw_fd(), &mut version)?; @@ -85,6 +87,7 @@ fn get_drm_device_name(fd: &File) -> RutabagaResult { desc: null_mut(), }; + // SAFETY: // Safe as no more than name_len + 1 bytes will be written to name. unsafe { drm_get_version(fd.as_raw_fd(), &mut version)?; diff --git a/rutabaga_gfx/src/rutabaga_os/descriptor.rs b/rutabaga_gfx/src/rutabaga_os/descriptor.rs index 9e80e636f8..abeb247b0f 100644 --- a/rutabaga_gfx/src/rutabaga_os/descriptor.rs +++ b/rutabaga_gfx/src/rutabaga_os/descriptor.rs @@ -75,6 +75,9 @@ impl IntoRawDescriptor for SafeDescriptor { } impl FromRawDescriptor for SafeDescriptor { + /// # Safety + /// Safe only if the caller ensures nothing has access to the descriptor after passing it to + /// `from_raw_descriptor` unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { SafeDescriptor { descriptor } } @@ -94,6 +97,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor { /// TODO(b/191800567): this API has sharp edges on Windows. We should evaluate making some /// adjustments to smooth those edges. fn try_from(rd: &dyn AsRawDescriptor) -> std::result::Result { + // SAFETY: // Safe because the underlying raw descriptor is guaranteed valid by rd's existence. // // Note that we are cloning the underlying raw descriptor since we have no guarantee of @@ -112,6 +116,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor { impl From for SafeDescriptor { fn from(f: File) -> SafeDescriptor { + // SAFETY: // Safe because we own the File at this point. unsafe { SafeDescriptor::from_raw_descriptor(f.into_raw_descriptor()) } } diff --git a/rutabaga_gfx/src/rutabaga_os/shm.rs b/rutabaga_gfx/src/rutabaga_os/shm.rs index be6a3326d6..7aaeeb8b05 100644 --- a/rutabaga_gfx/src/rutabaga_os/shm.rs +++ b/rutabaga_gfx/src/rutabaga_os/shm.rs @@ -42,6 +42,7 @@ impl IntoRawDescriptor for SharedMemory { impl From for SafeDescriptor { fn from(sm: SharedMemory) -> SafeDescriptor { + // SAFETY: // Safe because we own the SharedMemory at this point. unsafe { SafeDescriptor::from_raw_descriptor(sm.into_raw_descriptor()) } } diff --git a/rutabaga_gfx/src/rutabaga_os/sys/linux/descriptor.rs b/rutabaga_gfx/src/rutabaga_os/sys/linux/descriptor.rs index 963f8f1982..f286ae1169 100644 --- a/rutabaga_gfx/src/rutabaga_os/sys/linux/descriptor.rs +++ b/rutabaga_gfx/src/rutabaga_os/sys/linux/descriptor.rs @@ -27,6 +27,7 @@ pub type RawDescriptor = RawFd; /// `fd`. The cloned fd will have the `FD_CLOEXEC` flag set but will not share any other file /// descriptor flags with `fd`. fn clone_fd(fd: &dyn AsRawFd) -> Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) }; if ret < 0 { @@ -38,6 +39,8 @@ fn clone_fd(fd: &dyn AsRawFd) -> Result { impl Drop for SafeDescriptor { fn drop(&mut self) { + // SAFETY: + // Safe because we own the SafeDescriptor at this point. let _ = unsafe { libc::close(self.descriptor) }; } } @@ -62,6 +65,7 @@ impl SafeDescriptor { /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will /// share the same underlying count within the kernel. pub fn try_clone(&self) -> Result { + // SAFETY: // Safe because this doesn't modify any memory and we check the return value. let descriptor = unsafe { libc::fcntl(self.descriptor, libc::F_DUPFD_CLOEXEC, 0) }; if descriptor < 0 { @@ -74,6 +78,7 @@ impl SafeDescriptor { impl From for File { fn from(s: SafeDescriptor) -> File { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { File::from_raw_fd(s.into_raw_descriptor()) } } @@ -108,6 +113,9 @@ macro_rules! AsRawDescriptor { macro_rules! FromRawDescriptor { ($name:ident) => { impl FromRawDescriptor for $name { + // SAFETY: + // It is caller's responsibility to ensure that the descriptor is valid and + // stays valid for the lifetime of Self unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { $name::from_raw_fd(descriptor) } diff --git a/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs b/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs index 73e5db169c..7eeb33f404 100644 --- a/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs +++ b/rutabaga_gfx/src/rutabaga_os/sys/linux/memory_mapping.rs @@ -28,6 +28,7 @@ pub struct MemoryMapping { impl Drop for MemoryMapping { fn drop(&mut self) { + // SAFETY: // This is safe because we mmap the area at addr ourselves, and nobody // else is holding a reference to it. unsafe { @@ -51,6 +52,8 @@ impl MemoryMapping { }; if let Some(non_zero_size) = non_zero_opt { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let addr = unsafe { mmap( None, diff --git a/rutabaga_gfx/src/rutabaga_os/sys/windows/descriptor.rs b/rutabaga_gfx/src/rutabaga_os/sys/windows/descriptor.rs index 2b15bdb52e..99096f0981 100644 --- a/rutabaga_gfx/src/rutabaga_os/sys/windows/descriptor.rs +++ b/rutabaga_gfx/src/rutabaga_os/sys/windows/descriptor.rs @@ -33,6 +33,7 @@ pub type RawDescriptor = RawHandle; impl Drop for SafeDescriptor { fn drop(&mut self) { + // SAFETY: Safe because we own the descriptor. unsafe { CloseHandle(self.descriptor) }; } } @@ -48,7 +49,7 @@ pub fn duplicate_handle_from_source_process( hndl: RawHandle, target_process_handle: RawHandle, ) -> io::Result { - // Safe because: + // SAFETY: Safe because: // 1. We are checking the return code // 2. new_handle_ptr points to a valid location on the stack // 3. Caller guarantees hndl is a real valid handle. @@ -76,8 +77,9 @@ fn duplicate_handle_with_target_handle( hndl: RawHandle, target_process_handle: RawHandle, ) -> io::Result { - // Safe because `GetCurrentProcess` just gets the current process handle. duplicate_handle_from_source_process( + // SAFETY: + // Safe because `GetCurrentProcess` just gets the current process handle. unsafe { GetCurrentProcess() }, hndl, target_process_handle, @@ -85,6 +87,7 @@ fn duplicate_handle_with_target_handle( } pub fn duplicate_handle(hndl: RawHandle) -> io::Result { + // SAFETY: // Safe because `GetCurrentProcess` just gets the current process handle. duplicate_handle_with_target_handle(hndl, unsafe { GetCurrentProcess() }) } @@ -103,21 +106,26 @@ impl SafeDescriptor { /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will /// share the same underlying count within the kernel. pub fn try_clone(&self) -> Result { + // SAFETY: // Safe because `duplicate_handle` will return a valid handle, or at the very least error // out. Ok(unsafe { SafeDescriptor::from_raw_descriptor(duplicate_handle(self.descriptor)?) }) } } +// SAFETY: // On Windows, RawHandles are represented by raw pointers but are not used as such in // rust code, and are therefore safe to send between threads. unsafe impl Send for SafeDescriptor {} +// SAFETY: See safety comments for impl Send unsafe impl Sync for SafeDescriptor {} +// SAFETY: // On Windows, RawHandles are represented by raw pointers but are opaque to the // userspace and cannot be derefenced by rust code, and are therefore safe to // send between threads. unsafe impl Send for Descriptor {} +// SAFETY: See safety comments for impl Send unsafe impl Sync for Descriptor {} macro_rules! AsRawDescriptor { @@ -133,6 +141,7 @@ macro_rules! AsRawDescriptor { macro_rules! FromRawDescriptor { ($name:ident) => { impl FromRawDescriptor for $name { + // SAFETY: It is caller's responsibility to ensure that the descriptor is valid. unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self { return $name::from_raw_handle(descriptor); } diff --git a/rutabaga_gfx/src/rutabaga_utils.rs b/rutabaga_gfx/src/rutabaga_utils.rs index c8215d119f..dda10b3c46 100644 --- a/rutabaga_gfx/src/rutabaga_utils.rs +++ b/rutabaga_gfx/src/rutabaga_utils.rs @@ -44,7 +44,10 @@ pub struct RutabagaIovec { pub len: usize, } +// SAFETY: trivially safe unsafe impl Send for RutabagaIovec {} + +// SAFETY: trivially safe unsafe impl Sync for RutabagaIovec {} /// 3D resource creation parameters. Also used to create 2D resource. Constants based on Mesa's @@ -149,10 +152,15 @@ pub struct RutabagaDebug { pub message: *const c_char, } +// SAFETY: // This is sketchy, since `message` is a C-string and there's no locking + atomics. However, // the current use case is to mirror the C-API. If the `RutabagaDebugHandler` is used with // by Rust code, a different struct should be used. unsafe impl Send for RutabagaDebug {} +// SAFETY: +// This is sketchy, since `message` is a C-string and there's no locking + atomics. However, +// the current use case is to mirror the C-API. If the `RutabagaDebugHandler` is used with +// by Rust code, a different struct should be used. unsafe impl Sync for RutabagaDebug {} /// Mapped memory caching flags (see virtio_gpu spec) diff --git a/rutabaga_gfx/src/virgl_renderer.rs b/rutabaga_gfx/src/virgl_renderer.rs index b6a73ee298..976920a4f0 100644 --- a/rutabaga_gfx/src/virgl_renderer.rs +++ b/rutabaga_gfx/src/virgl_renderer.rs @@ -56,6 +56,7 @@ fn import_resource(resource: &mut RutabagaResource) -> RutabagaResult<()> { if let Some(handle) = &resource.handle { if handle.handle_type == RUTABAGA_MEM_HANDLE_TYPE_DMABUF { let dmabuf_fd = handle.os_handle.try_clone()?.into_raw_descriptor(); + // SAFETY: // Safe because we are being passed a valid fd unsafe { let dmabuf_size = libc::lseek64(dmabuf_fd, 0, libc::SEEK_END); @@ -95,6 +96,7 @@ impl RutabagaContext for VirglRendererContext { return Err(RutabagaError::InvalidCommandSize(commands.len())); } let dword_count = (commands.len() / size_of::()) as i32; + // SAFETY: // Safe because the context and buffer are valid and virglrenderer will have been // initialized if there are Context instances. let ret = unsafe { @@ -113,6 +115,7 @@ impl RutabagaContext for VirglRendererContext { Err(e) => error!("importing resource failing with {}", e), } + // SAFETY: // The context id and resource id must be valid because the respective instances ensure // their lifetime. unsafe { @@ -121,6 +124,7 @@ impl RutabagaContext for VirglRendererContext { } fn detach(&mut self, resource: &RutabagaResource) { + // SAFETY: // The context id and resource id must be valid because the respective instances ensure // their lifetime. unsafe { @@ -138,6 +142,8 @@ impl RutabagaContext for VirglRendererContext { // this assumption. let flags: u32 = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { virgl_renderer_context_create_fence( fence.ctx_id, @@ -152,6 +158,7 @@ impl RutabagaContext for VirglRendererContext { impl Drop for VirglRendererContext { fn drop(&mut self) { + // SAFETY: // The context is safe to destroy because nothing else can be referencing it. unsafe { virgl_renderer_context_destroy(self.ctx_id); @@ -163,6 +170,8 @@ extern "C" fn debug_callback(fmt: *const ::std::os::raw::c_char, ap: stdio::va_l const BUF_LEN: usize = 256; let mut v = [b' '; BUF_LEN]; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let printed_len = unsafe { let ptr = v.as_mut_ptr() as *mut ::std::os::raw::c_char; #[cfg(any( @@ -193,6 +202,8 @@ extern "C" fn debug_callback(fmt: *const ::std::os::raw::c_char, ap: stdio::va_l extern "C" fn write_context_fence(cookie: *mut c_void, ctx_id: u32, ring_idx: u32, fence_id: u64) { catch_unwind(|| { assert!(!cookie.is_null()); + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let cookie = unsafe { &*(cookie as *mut RutabagaCookie) }; // Call fence completion callback @@ -208,6 +219,8 @@ extern "C" fn write_context_fence(cookie: *mut c_void, ctx_id: u32, ring_idx: u3 .unwrap_or_else(|_| abort()) } +// TODO(b/315870313): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe extern "C" fn write_fence(cookie: *mut c_void, fence: u32) { catch_unwind(|| { assert!(!cookie.is_null()); @@ -226,6 +239,8 @@ unsafe extern "C" fn write_fence(cookie: *mut c_void, fence: u32) { .unwrap_or_else(|_| abort()) } +// TODO(b/315870313): Add safety comment +#[allow(clippy::undocumented_unsafe_blocks)] unsafe extern "C" fn get_server_fd(cookie: *mut c_void, version: u32) -> c_int { catch_unwind(|| { assert!(!cookie.is_null()); @@ -267,8 +282,9 @@ fn export_query(resource_id: u32) -> RutabagaResult { query.in_resource_id = resource_id; query.in_export_fds = 0; - // Safe because the image parameters are stack variables of the correct type. let ret = + // SAFETY: + // Safe because the image parameters are stack variables of the correct type. unsafe { virgl_renderer_execute(&mut query as *mut _ as *mut c_void, query.hdr.size) }; ret_to_res(ret)?; @@ -282,6 +298,8 @@ impl VirglRenderer { render_server_fd: Option, ) -> RutabagaResult> { if cfg!(debug_assertions) { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { libc::dup2(libc::STDOUT_FILENO, libc::STDERR_FILENO) }; if ret == -1 { warn!( @@ -302,7 +320,11 @@ impl VirglRenderer { return Err(RutabagaError::AlreadyInUse); } - unsafe { virgl_set_debug_callback(Some(debug_callback)) }; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + virgl_set_debug_callback(Some(debug_callback)) + }; // Cookie is intentionally never freed because virglrenderer never gets uninitialized. // Otherwise, Resource and Context would become invalid because their lifetime is not tied @@ -314,6 +336,7 @@ impl VirglRenderer { debug_handler: None, })); + // SAFETY: // Safe because a valid cookie and set of callbacks is used and the result is checked for // error. let ret = unsafe { @@ -330,6 +353,8 @@ impl VirglRenderer { fn map_info(&self, resource_id: u32) -> RutabagaResult { let mut map_info = 0; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { virgl_renderer_resource_get_map_info(resource_id, &mut map_info) }; ret_to_res(ret)?; @@ -357,10 +382,13 @@ impl VirglRenderer { fn export_blob(&self, resource_id: u32) -> RutabagaResult> { let mut fd_type = 0; let mut fd = 0; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { virgl_renderer_resource_export_blob(resource_id, &mut fd_type, &mut fd) }; ret_to_res(ret)?; + // SAFETY: // Safe because the FD was just returned by a successful virglrenderer // call so it must be valid and owned by us. let handle = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; @@ -383,6 +411,7 @@ impl VirglRenderer { impl Drop for VirglRenderer { fn drop(&mut self) { + // SAFETY: // Safe because virglrenderer is initialized. // // This invalidates all context ids and resource ids. It is fine because struct Rutabaga @@ -398,6 +427,7 @@ impl RutabagaComponent for VirglRenderer { fn get_capset_info(&self, capset_id: u32) -> (u32, u32) { let mut version = 0; let mut size = 0; + // SAFETY: // Safe because virglrenderer is initialized by now and properly size stack variables are // used for the pointers. unsafe { @@ -409,6 +439,7 @@ impl RutabagaComponent for VirglRenderer { fn get_capset(&self, capset_id: u32, version: u32) -> Vec { let (_, max_size) = self.get_capset_info(capset_id); let mut buf = vec![0u8; max_size as usize]; + // SAFETY: // Safe because virglrenderer is initialized by now and the given buffer is sized properly // for the given cap id/version. unsafe { @@ -418,19 +449,30 @@ impl RutabagaComponent for VirglRenderer { } fn force_ctx_0(&self) { - unsafe { virgl_renderer_force_ctx_0() }; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + virgl_renderer_force_ctx_0() + }; } fn create_fence(&mut self, fence: RutabagaFence) -> RutabagaResult<()> { + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { virgl_renderer_create_fence(fence.fence_id as i32, fence.ctx_id) }; ret_to_res(ret) } fn event_poll(&self) { - unsafe { virgl_renderer_poll() }; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + virgl_renderer_poll() + }; } fn poll_descriptor(&self) -> Option { + // SAFETY: // Safe because it can be called anytime and returns -1 in the event of an error. let fd = unsafe { virgl_renderer_get_poll_fd() }; if fd >= 0 { @@ -460,6 +502,7 @@ impl RutabagaComponent for VirglRenderer { flags: resource_create_3d.flags, }; + // SAFETY: // Safe because virglrenderer is initialized by now, and the return value is checked before // returning a new resource. The backing buffers are not supplied with this call. let ret = unsafe { virgl_renderer_resource_create(&mut args, null_mut(), 0) }; @@ -487,6 +530,7 @@ impl RutabagaComponent for VirglRenderer { resource_id: u32, vecs: &mut Vec, ) -> RutabagaResult<()> { + // SAFETY: // Safe because the backing is into guest memory that we store a reference count for. let ret = unsafe { virgl_renderer_resource_attach_iov( @@ -499,6 +543,7 @@ impl RutabagaComponent for VirglRenderer { } fn detach_backing(&self, resource_id: u32) { + // SAFETY: // Safe as we don't need the old backing iovecs returned and the reference to the guest // memory can be dropped as it will no longer be needed for this resource. unsafe { @@ -507,6 +552,7 @@ impl RutabagaComponent for VirglRenderer { } fn unref_resource(&self, resource_id: u32) { + // SAFETY: // The resource is safe to unreference destroy because no user of these bindings can still // be holding a reference. unsafe { @@ -533,6 +579,7 @@ impl RutabagaComponent for VirglRenderer { d: transfer.d, }; + // SAFETY: // Safe because only stack variables of the appropriate type are used. let ret = unsafe { virgl_renderer_transfer_write_iov( @@ -584,6 +631,7 @@ impl RutabagaComponent for VirglRenderer { None => (null_mut(), 0), }; + // SAFETY: // Safe because only stack variables of the appropriate type are used. let ret = unsafe { virgl_renderer_transfer_read_iov( @@ -628,6 +676,8 @@ impl RutabagaComponent for VirglRenderer { num_iovs: num_iovecs as u32, }; + // TODO(b/315870313): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ret = unsafe { virgl_renderer_resource_create_blob(&resource_create_args) }; ret_to_res(ret)?; @@ -653,6 +703,7 @@ impl RutabagaComponent for VirglRenderer { fn map(&self, resource_id: u32) -> RutabagaResult { let mut map: *mut c_void = null_mut(); let mut size: u64 = 0; + // SAFETY: // Safe because virglrenderer wraps and validates use of GL/VK. let ret = unsafe { virgl_renderer_resource_map(resource_id, &mut map, &mut size) }; if ret != 0 { @@ -666,6 +717,7 @@ impl RutabagaComponent for VirglRenderer { } fn unmap(&self, resource_id: u32) -> RutabagaResult<()> { + // SAFETY: // Safe because virglrenderer is initialized by now. let ret = unsafe { virgl_renderer_resource_unmap(resource_id) }; ret_to_res(ret) @@ -675,11 +727,13 @@ impl RutabagaComponent for VirglRenderer { fn export_fence(&self, fence_id: u64) -> RutabagaResult { #[cfg(feature = "virgl_renderer_next")] { - // Safe because the parameters are stack variables of the correct type. let mut fd: i32 = 0; + // SAFETY: + // Safe because the parameters are stack variables of the correct type. let ret = unsafe { virgl_renderer_export_fence(fence_id, &mut fd) }; ret_to_res(ret)?; + // SAFETY: // Safe because the FD was just returned by a successful virglrenderer call so it must // be valid and owned by us. let fence = unsafe { SafeDescriptor::from_raw_descriptor(fd) }; @@ -705,6 +759,7 @@ impl RutabagaComponent for VirglRenderer { name = name_string; } + // SAFETY: // Safe because virglrenderer is initialized by now and the context name is statically // allocated. The return value is checked before returning a new context. let ret = unsafe { diff --git a/src/crosvm/plugin/mod.rs b/src/crosvm/plugin/mod.rs index ad9d132263..9c33c38bd7 100644 --- a/src/crosvm/plugin/mod.rs +++ b/src/crosvm/plugin/mod.rs @@ -116,6 +116,7 @@ pub enum CommError { fn new_seqpacket_pair() -> SysResult<(UnixDatagram, UnixDatagram)> { let mut fds = [0, 0]; + // SAFETY: trivially safe as we check the return value unsafe { let ret = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds.as_mut_ptr()); if ret == 0 { @@ -143,6 +144,7 @@ fn new_pipe_pair() -> SysResult { // Increasing the pipe size can be a nice-to-have to make sure that // messages get across atomically (and made sure that writes don't block), // though it's not necessary a hard requirement for things to work. + // SAFETY: safe because no memory is modified and we check return value. let flags = unsafe { fcntl( to_crosvm.0.as_raw_descriptor(), @@ -157,6 +159,7 @@ fn new_pipe_pair() -> SysResult { SysError::last() ); } + // SAFETY: safe because no memory is modified and we check return value. let flags = unsafe { fcntl( to_plugin.0.as_raw_descriptor(), @@ -279,9 +282,10 @@ pub fn run_vcpus( // SIGRTMIN each time it runs the VM, so this mode should be avoided. if use_kvm_signals { + // SAFETY: + // Our signal handler does nothing and is trivially async signal safe. unsafe { extern "C" fn handle_signal(_: c_int) {} - // Our signal handler does nothing and is trivially async signal safe. // We need to install this signal handler even though we do block // the signal below, to ensure that this signal will interrupt // execution of KVM_RUN (this is implementation issue). @@ -291,6 +295,7 @@ pub fn run_vcpus( // We do not really want the signal handler to run... block_signal(SIGRTMIN() + 0).expect("failed to block signal"); } else { + // SAFETY: trivially safe as we check return value. unsafe { extern "C" fn handle_signal(_: c_int) { Vcpu::set_local_immediate_exit(true); @@ -585,6 +590,7 @@ pub fn run_config(cfg: Config) -> Result<()> { tap_interfaces.push(tap); } NetParametersMode::TapFd { tap_fd, mac } => { + // SAFETY: // Safe because we ensure that we get a unique handle to the fd. let tap = unsafe { Tap::from_raw_descriptor( diff --git a/src/crosvm/plugin/process.rs b/src/crosvm/plugin/process.rs index 36e2893d1b..798a2650d0 100644 --- a/src/crosvm/plugin/process.rs +++ b/src/crosvm/plugin/process.rs @@ -305,6 +305,7 @@ impl Process { /// Waits without blocking for the plugin process to exit and returns the status. pub fn try_wait(&mut self) -> SysResult { let mut status = 0; + // SAFETY: // Safe because waitpid is given a valid pointer of correct size and mutability, and the // return value is checked. let ret = unsafe { waitpid(self.plugin_pid, &mut status, WNOHANG) }; @@ -649,6 +650,7 @@ impl Process { response_fds.push(self.kill_evt.as_raw_descriptor()); Ok(()) } else if request.has_check_extension() { + // SAFETY: // Safe because the Cap enum is not read by the check_extension method. In that method, // cap is cast back to an integer and fed to an ioctl. If the extension name is actually // invalid, the kernel will safely reject the extension under the assumption that the diff --git a/src/crosvm/plugin/vcpu.rs b/src/crosvm/plugin/vcpu.rs index ad83055721..3b7ec51471 100644 --- a/src/crosvm/plugin/vcpu.rs +++ b/src/crosvm/plugin/vcpu.rs @@ -681,15 +681,17 @@ impl PluginVcpu { Layout::from_size_align(size, ALIGN_OF_MSRS).expect("impossible layout"); let mut allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Safe to obtain an exclusive reference because there are no other // references to the allocation yet and all-zero is a valid bit // pattern. let kvm_msrs = unsafe { allocation.as_mut::() }; + // SAFETY: + // Mapping the unsized array to a slice is unsafe becase the length isn't known. + // Providing the length used to create the struct guarantees the entire slice is + // valid. unsafe { - // Mapping the unsized array to a slice is unsafe becase the length isn't known. - // Providing the length used to create the struct guarantees the entire slice is - // valid. let kvm_msr_entries: &mut [kvm_msr_entry] = kvm_msrs.entries.as_mut_slice(request_entries.len()); for (msr_entry, entry) in kvm_msr_entries.iter_mut().zip(request_entries) { @@ -728,6 +730,7 @@ impl PluginVcpu { cap: capability, ..Default::default() }; + // SAFETY: // Safe because the allowed capabilities don't take pointer arguments. unsafe { vcpu.kvm_enable_cap(&cap) } } diff --git a/src/crosvm/sys/linux.rs b/src/crosvm/sys/linux.rs index a510b35072..1532061c77 100644 --- a/src/crosvm/sys/linux.rs +++ b/src/crosvm/sys/linux.rs @@ -681,8 +681,10 @@ fn create_devices( if !coiommu_attached_endpoints.is_empty() || !iommu_attached_endpoints.is_empty() { let mut buf = mem::MaybeUninit::::zeroed(); + // SAFETY: trivially safe let res = unsafe { libc::getrlimit64(libc::RLIMIT_MEMLOCK, buf.as_mut_ptr()) }; if res == 0 { + // SAFETY: safe because getrlimit64 has returned success. let limit = unsafe { buf.assume_init() }; let rlim_new = limit.rlim_cur.saturating_add(vm.get_memory().memory_size()); let rlim_max = max(limit.rlim_max, rlim_new); @@ -691,6 +693,7 @@ fn create_devices( rlim_cur: rlim_new, rlim_max, }; + // SAFETY: trivially safe let res = unsafe { libc::setrlimit64(libc::RLIMIT_MEMLOCK, &limit_arg) }; if res != 0 { bail!("Set rlimit failed"); @@ -2215,6 +2218,7 @@ fn add_hotplug_device( endpoint_addr, wrapper_id: vfio_wrapper.id(), container: { + // SAFETY: // Safe because the descriptor is uniquely owned by `descriptor`. unsafe { File::from_raw_descriptor(descriptor) } }, @@ -4100,6 +4104,7 @@ fn jail_and_start_vu_device( keep_rds.sort_unstable(); keep_rds.dedup(); + // SAFETY: // Safe because we are keeping all the descriptors needed for the child to function. match unsafe { jail.fork(Some(&keep_rds)).context("error while forking")? } { 0 => { @@ -4110,6 +4115,7 @@ fn jail_and_start_vu_device( let _ = std::mem::ManuallyDrop::new(parent_resources); // Make sure the child process does not survive its parent. + // SAFETY: trivially safe if unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGKILL) } < 0 { panic!("call to prctl(PR_SET_DEATHSIG, SIGKILL) failed. Aborting child process."); } @@ -4118,6 +4124,7 @@ fn jail_and_start_vu_device( const MAX_LEN: usize = 15; // pthread_setname_np() limit on Linux let debug_label_trimmed = &name.as_bytes()[..std::cmp::min(MAX_LEN, name.len())]; let thread_name = CString::new(debug_label_trimmed).unwrap(); + // SAFETY: // Safe because we trimmed the name to 15 characters (and pthread_setname_np will return // an error if we don't anyway). let _ = unsafe { libc::pthread_setname_np(libc::pthread_self(), thread_name.as_ptr()) }; @@ -4133,6 +4140,7 @@ fn jail_and_start_vu_device( 1 } }; + // SAFETY: trivially safe unsafe { libc::exit(res) }; } pid => { diff --git a/src/crosvm/sys/linux/config.rs b/src/crosvm/sys/linux/config.rs index 0ff348fdce..f8a7515b82 100644 --- a/src/crosvm/sys/linux/config.rs +++ b/src/crosvm/sys/linux/config.rs @@ -106,7 +106,9 @@ impl Default for SharedDir { tag: Default::default(), kind: Default::default(), ugid: (None, None), + // SAFETY: trivially safe uid_map: format!("0 {} 1", unsafe { geteuid() }), + // SAFETY: trivially safe gid_map: format!("0 {} 1", unsafe { getegid() }), fs_cfg: Default::default(), p9_cfg: Default::default(), diff --git a/src/crosvm/sys/linux/device_helpers.rs b/src/crosvm/sys/linux/device_helpers.rs index df9e700654..e82ffd32bf 100644 --- a/src/crosvm/sys/linux/device_helpers.rs +++ b/src/crosvm/sys/linux/device_helpers.rs @@ -744,6 +744,7 @@ fn create_tap_for_net_device( Ok((tap, *mac)) } NetParametersMode::TapFd { tap_fd, mac } => { + // SAFETY: // Safe because we ensure that we get a unique handle to the fd. let tap = unsafe { Tap::from_raw_descriptor( diff --git a/src/crosvm/sys/linux/vcpu.rs b/src/crosvm/sys/linux/vcpu.rs index 12adb86151..cafe4736d4 100644 --- a/src/crosvm/sys/linux/vcpu.rs +++ b/src/crosvm/sys/linux/vcpu.rs @@ -190,6 +190,7 @@ fn set_vcpu_thread_local(vcpu: Option<&dyn VcpuArch>, signal_num: c_int) { } pub fn setup_vcpu_signal_handler() -> Result<()> { + // SAFETY: trivially safe as we check return value. unsafe { extern "C" fn handle_signal(_: c_int) { // Use LocalKey::try_with() so we don't panic if a signal happens while the destructor diff --git a/src/crosvm/sys/windows/broker.rs b/src/crosvm/sys/windows/broker.rs index d6be6de638..8a93be8b51 100644 --- a/src/crosvm/sys/windows/broker.rs +++ b/src/crosvm/sys/windows/broker.rs @@ -343,6 +343,8 @@ impl Child for SandboxedChild { } fn kill(&mut self) -> std::io::Result<()> { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] if unsafe { TerminateProcess(self.0.as_raw_descriptor(), KILL_CHILD_EXIT_CODE) == 0 } { Err(std::io::Error::last_os_error()) } else { diff --git a/src/sys/linux/panic_hook.rs b/src/sys/linux/panic_hook.rs index 6652cf65b8..154a7fd10a 100644 --- a/src/sys/linux/panic_hook.rs +++ b/src/sys/linux/panic_hook.rs @@ -25,8 +25,8 @@ use libc::STDERR_FILENO; // the pipe and the old stderr as a pair of files. fn redirect_stderr() -> Option<(File, File)> { let mut fds = [-1, -1]; + // SAFETY: Trivially safe because the return value is checked. unsafe { - // Trivially safe because the return value is checked. let old_stderr = dup(STDERR_FILENO); if old_stderr == -1 { return None; @@ -57,6 +57,7 @@ fn redirect_stderr() -> Option<(File, File)> { fn restore_stderr(stderr: File) -> bool { let descriptor = stderr.into_raw_descriptor(); + // SAFETY: // Safe because descriptor is guaranteed to be valid and replacing stderr // should be an atomic operation. unsafe { dup2(descriptor, STDERR_FILENO) != -1 } diff --git a/src/sys/windows.rs b/src/sys/windows.rs index 22695d3d68..65b6ea3437 100644 --- a/src/sys/windows.rs +++ b/src/sys/windows.rs @@ -2221,9 +2221,10 @@ fn set_tsc_clock_snapshot() { /// Launches run_config for the broker, reading configuration from a TubeTransporter. pub fn run_config_for_broker(raw_tube_transporter: RawDescriptor) -> Result { - // Safe because we know that raw_transport_tube is valid (passed by inheritance), and that - // the blocking & framing modes are accurate because we create them ourselves in the broker. let tube_transporter = + // SAFETY: + // Safe because we know that raw_transport_tube is valid (passed by inheritance), and that + // the blocking & framing modes are accurate because we create them ourselves in the broker. unsafe { TubeTransporterReader::from_raw_descriptor(raw_tube_transporter) }; let mut tube_data_list = tube_transporter diff --git a/src/sys/windows/main.rs b/src/sys/windows/main.rs index 6320dbd512..c4fa44899c 100644 --- a/src/sys/windows/main.rs +++ b/src/sys/windows/main.rs @@ -46,10 +46,11 @@ use crate::Config; pub(crate) fn run_slirp(args: RunSlirpCommand) -> Result<()> { let raw_transport_tube = args.bootstrap as RawDescriptor; - // Safe because we know that raw_transport_tube is valid (passed by inheritance), - // and that the blocking & framing modes are accurate because we create them ourselves - // in the broker. let tube_transporter = + // SAFETY: + // Safe because we know that raw_transport_tube is valid (passed by inheritance), + // and that the blocking & framing modes are accurate because we create them ourselves + // in the broker. unsafe { TubeTransporterReader::from_raw_descriptor(raw_transport_tube) }; let mut tube_data_list = tube_transporter diff --git a/swap/src/controller.rs b/swap/src/controller.rs index cf04e48ec2..aa312a071a 100644 --- a/swap/src/controller.rs +++ b/swap/src/controller.rs @@ -635,6 +635,7 @@ fn monitor_process( }; // TODO(b/272634283): Should just disable vmm-swap without crash. + // SAFETY: // Safe because the regions are from guest memory and uffd_list contains all // the processes of crosvm. unsafe { register_regions(®ions, uffd_list.get_list()) } @@ -812,6 +813,7 @@ fn move_guest_to_staging( let mut pages = 0; let result = guest_memory.regions().try_for_each(|region| { + // SAFETY: // safe because: // * all the regions are registered to all userfaultfd // * no process access the guest memory @@ -973,11 +975,11 @@ fn handle_vmm_swap<'scope, 'env>( { Command::ProcessForked { uffd, reply_tube } => { debug!("new fork uffd: {:?}", uffd); - // SAFETY: regions is generated from the guest memory - // SAFETY: the uffd is from a new process. - let result = if let Err(e) = + let result = if let Err(e) = { + // SAFETY: regions is generated from the guest memory + // SAFETY: the uffd is from a new process. unsafe { register_regions(regions, std::array::from_ref(&uffd)) } - { + } { error!("failed to setup uffd: {:?}", e); false } else { diff --git a/swap/src/file.rs b/swap/src/file.rs index e4bcf78c14..2ae1909e5e 100644 --- a/swap/src/file.rs +++ b/swap/src/file.rs @@ -728,6 +728,8 @@ mod tests { swap_file.write_to_file(0, data).unwrap(); let page = swap_file.page_content(0, false).unwrap().unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let result = unsafe { slice::from_raw_parts(page.as_ptr(), pagesize()) }; assert_eq!(result, data); } @@ -746,6 +748,8 @@ mod tests { fn assert_page_content(swap_file: &SwapFile, idx: usize, data: &[u8]) { let page = swap_file.page_content(idx, false).unwrap().unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let result = unsafe { slice::from_raw_parts(page.as_ptr(), pagesize()) }; assert_eq!(result, data); } @@ -903,6 +907,8 @@ mod tests { swap_file.clear_range(0..1).unwrap(); let slice = swap_file.page_content(0, true).unwrap().unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let slice = unsafe { slice::from_raw_parts(slice.as_ptr(), slice.size()) }; assert_eq!(slice, data); } diff --git a/swap/src/page_handler.rs b/swap/src/page_handler.rs index 4e603bf761..0104cd1f7a 100644 --- a/swap/src/page_handler.rs +++ b/swap/src/page_handler.rs @@ -79,6 +79,7 @@ pub enum Error { /// The memory range must be on the guest memory. #[deny(unsafe_op_in_unsafe_fn)] unsafe fn remove_memory(addr: usize, len: usize) -> std::result::Result<(), base::Error> { + // SAFETY: // Safe because the caller guarantees addr is in guest memory, so this does not affect any rust // managed memory. let ret = unsafe { libc::madvise(addr as *mut libc::c_void, len, libc::MADV_REMOVE) }; @@ -140,6 +141,7 @@ impl Task for MoveToStaging { copy_op.execute(); } // Remove chunks of pages at once to reduce madvise(2) syscall. + // SAFETY: // Safe because the region is already backed by the file and the content will be // swapped in on a page fault. let result = unsafe { @@ -469,6 +471,7 @@ impl<'a> PageHandler<'a> { let size = (data_range.end - data_range.start) as usize; assert!(is_page_aligned(size)); + // SAFETY: // Safe because: // * src_addr is aligned with page size // * the data_range starting from src_addr is on the guest memory. @@ -537,6 +540,7 @@ impl<'a> PageHandler<'a> { let pages = idx_range.end - idx_range.start; let slice = region.staging_memory.get_slice(idx_range.clone())?; // Convert VolatileSlice to &[u8] + // SAFETY: // Safe because the range of volatile slice is already validated. let slice = unsafe { std::slice::from_raw_parts(slice.as_ptr(), slice.size()) }; file.write_to_file(idx_range_in_file.start, slice)?; diff --git a/swap/src/processes.rs b/swap/src/processes.rs index e3a4786411..0d89169990 100644 --- a/swap/src/processes.rs +++ b/swap/src/processes.rs @@ -54,6 +54,7 @@ impl ProcessesGuard { /// Stops all the crosvm processes by sending SIGSTOP signal. fn stop_the_world(&self) -> Result<()> { for pid in &self.pids { + // SAFETY: // safe because pid in pids are crosvm processes except this monitor process. unsafe { kill(*pid, Signal::Stop as i32) }.context("failed to stop process")?; } @@ -66,6 +67,7 @@ impl ProcessesGuard { /// Resumes all the crosvm processes by sending SIGCONT signal. fn continue_the_world(&self) { for pid in &self.pids { + // SAFETY: // safe because pid in pids are crosvm processes except this monitor process and // continue signal does not have side effects. // ignore the result because we don't care whether it succeeds. diff --git a/swap/src/staging.rs b/swap/src/staging.rs index 51f490c99e..1384e13e2d 100644 --- a/swap/src/staging.rs +++ b/swap/src/staging.rs @@ -52,6 +52,7 @@ pub struct CopyOp { size: usize, } +/// SAFETY: /// CopyOp is safe to be sent to other threads because: /// * The source memory region (guest memory) is alive for the monitor process lifetime. /// * The destination memory region (staging memory) is alive until all the [CopyOp] are executed. @@ -61,6 +62,7 @@ unsafe impl Send for CopyOp {} impl CopyOp { /// Copies the specified the guest memory to the staging memory. pub fn execute(self) { + // SAFETY: // Safe because: // * the source memory is in guest memory and no processes access it. // * src_addr and dst_addr are aligned with the page size. @@ -245,6 +247,8 @@ mod tests { let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap(); let src_addr = mmap.as_ptr(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(src_addr, 1, 4).unwrap(); // empty @@ -280,11 +284,15 @@ mod tests { let mmap = create_mmap(1, 1); let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(mmap.as_ptr(), 0, 1).unwrap().execute(); } let page = staging_memory.page_content(0).unwrap().unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let result = unsafe { std::slice::from_raw_parts(page.as_ptr(), page.size()) }; assert_eq!(result, &vec![1; pagesize()]); } @@ -307,6 +315,8 @@ mod tests { let mmap = create_mmap(1, 5); let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(mmap.as_ptr(), 0, 5).unwrap(); } @@ -338,6 +348,8 @@ mod tests { let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap(); let src_addr = mmap.as_ptr(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(src_addr, 1, 2).unwrap(); staging_memory.copy(src_addr, 3, 1).unwrap(); @@ -360,6 +372,8 @@ mod tests { let src_addr1 = mmap1.as_ptr(); let src_addr2 = mmap2.as_ptr(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(src_addr1, 1, 1).unwrap().execute(); staging_memory.copy(src_addr2, 2, 1).unwrap().execute(); @@ -399,6 +413,8 @@ mod tests { let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap(); let src_addr = mmap.as_ptr(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { staging_memory.copy(src_addr, 1, 4).unwrap(); staging_memory.copy(src_addr, 12, 1).unwrap(); diff --git a/swap/src/uffd_list.rs b/swap/src/uffd_list.rs index 903986facc..22c6df9b5b 100644 --- a/swap/src/uffd_list.rs +++ b/swap/src/uffd_list.rs @@ -192,10 +192,16 @@ mod tests { self.list.borrow_mut().push((raw_desc, false)); - unsafe { Userfaultfd::from_raw_descriptor(raw_desc) } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + Userfaultfd::from_raw_descriptor(raw_desc) + } } fn make_readable(&self, raw_desc: RawDescriptor) { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let ev = unsafe { Event::from_raw_descriptor(raw_desc) }; ev.signal().unwrap(); // Keep the file descriptor opened. The generated fake Userfaultfd has the actual diff --git a/swap/src/userfaultfd.rs b/swap/src/userfaultfd.rs index d7f5778317..76a0f27e2a 100644 --- a/swap/src/userfaultfd.rs +++ b/swap/src/userfaultfd.rs @@ -103,6 +103,7 @@ impl From for Error { pub unsafe fn register_regions(regions: &[Range], uffds: &[Userfaultfd]) -> Result<()> { for address_range in regions { for uffd in uffds { + // SAFETY: // Safe because the range is from the guest memory region. let result = unsafe { uffd.register(address_range.start, address_range.end - address_range.start) @@ -188,6 +189,7 @@ impl Factory { /// Creates a new [Userfaultfd] for this process. pub fn create(&self) -> anyhow::Result { if let Some(dev_file) = &self.dev_file { + // SAFETY: // Safe because ioctl(2) USERFAULTFD_IOC_NEW with does not change Rust memory safety. let res = unsafe { ioctl_with_val( @@ -200,6 +202,7 @@ impl Factory { return errno_result().context("USERFAULTFD_IOC_NEW"); } else { // Safe because the uffd is not owned by anyone in this process. + // SAFETY: unsafe { Userfaultfd::from_raw_descriptor(res) } }; let mut api = userfaultfd_sys::uffdio_api { @@ -207,6 +210,7 @@ impl Factory { features: (FeatureFlags::MISSING_SHMEM | FeatureFlags::EVENT_REMOVE).bits(), ioctls: 0, }; + // SAFETY: // Safe because ioctl(2) UFFDIO_API with does not change Rust memory safety. let res = unsafe { ioctl_with_mut_ref(&uffd, UFFDIO_API(), &mut api) }; if res < 0 { @@ -337,6 +341,7 @@ impl Userfaultfd { /// * `len` - the length in bytes of the page(s). /// * `wake` - whether or not to unblock the faulting thread. pub fn zero(&self, addr: usize, len: usize, wake: bool) -> Result { + // SAFETY: // safe because zeroing untouched pages does not break the Rust memory safety since "All // runtime-allocated memory in a Rust program begins its life as uninitialized." // https://doc.rust-lang.org/nomicon/uninitialized.html @@ -352,17 +357,20 @@ impl Userfaultfd { /// * `data` - the starting address of the content. /// * `wake` - whether or not to unblock the faulting thread. pub fn copy(&self, addr: usize, len: usize, data: *const u8, wake: bool) -> Result { - // safe because filling untouched pages with data does not break the Rust memory safety - // since "All runtime-allocated memory in a Rust program begins its life as uninitialized." - // https://doc.rust-lang.org/nomicon/uninitialized.html - Ok(unsafe { - self.uffd.copy( - data as *const libc::c_void, - addr as *mut libc::c_void, - len, - wake, - ) - }?) + Ok( + // SAFETY: + // safe because filling untouched pages with data does not break the Rust memory safety + // since "All runtime-allocated memory in a Rust program begins its life as uninitialized." + // https://doc.rust-lang.org/nomicon/uninitialized.html + unsafe { + self.uffd.copy( + data as *const libc::c_void, + addr as *mut libc::c_void, + len, + wake, + ) + }?, + ) } /// Wake the faulting thread blocked by the page(s). diff --git a/swap/tests/main.rs b/swap/tests/main.rs index 4cc1d038df..335623344c 100644 --- a/swap/tests/main.rs +++ b/swap/tests/main.rs @@ -29,10 +29,13 @@ mod test { let region = base_addr..(base_addr + 3 * pagesize()); let regions = [region]; let (tube_main, tube_child) = Tube::pair().unwrap(); + // SAFETY: trivially safe let pid = unsafe { libc::fork() }; if pid == 0 { // child process let uffd = create_uffd_for_test(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] tube_child .send(&unsafe { SafeDescriptor::from_raw_descriptor(uffd.as_raw_descriptor()) }) .unwrap(); @@ -43,8 +46,12 @@ mod test { .unwrap() .into_raw_descriptor(); wait_for_pid(pid, 0).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let uffd_child = unsafe { Userfaultfd::from_raw_descriptor(uffd_descriptor) }; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let result = unsafe { register_regions(®ions, &[uffd, uffd_child]) }; // no error from ENOMEM @@ -58,10 +65,13 @@ mod test { let region = base_addr..(base_addr + 3 * pagesize()); let regions = [region]; let (tube_main, tube_child) = Tube::pair().unwrap(); + // SAFETY: trivially safe let pid = unsafe { libc::fork() }; if pid == 0 { // child process let uffd = create_uffd_for_test(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] tube_child .send(&unsafe { SafeDescriptor::from_raw_descriptor(uffd.as_raw_descriptor()) }) .unwrap(); @@ -72,9 +82,13 @@ mod test { .recv::() .unwrap() .into_raw_descriptor(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] let uffd_child = unsafe { Userfaultfd::from_raw_descriptor(uffd_descriptor) }; let uffds = [uffd, uffd_child]; + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, &uffds) }.unwrap(); tube_main.send(&0_u8).unwrap(); // wait until the child process die and the uffd_child become obsolete. diff --git a/swap/tests/page_handler.rs b/swap/tests/page_handler.rs index 389ea7fdc9..c1091e747f 100644 --- a/swap/tests/page_handler.rs +++ b/swap/tests/page_handler.rs @@ -148,6 +148,8 @@ fn handle_page_fault_zero_success_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); page_handler.handle_page_fault(&uffd, base_addr).unwrap(); @@ -163,6 +165,7 @@ fn handle_page_fault_zero_success_impl() { let mut result = Vec::new(); for i in 0..(3 * pagesize()) { let ptr = shm.mmap.as_ptr() as usize + i; + // SAFETY: trivially safe unsafe { result.push(*(ptr as *mut u8)); } @@ -194,6 +197,8 @@ fn handle_page_fault_invalid_address_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); assert_eq!( @@ -229,6 +234,8 @@ fn handle_page_fault_duplicated_page_fault_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); assert_eq!( @@ -260,6 +267,8 @@ fn handle_page_remove_success_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); // fill the first page with zero @@ -267,6 +276,7 @@ fn handle_page_remove_success_impl() { // write value on another thread to avoid blocking forever let join_handle = thread::spawn(move || { let ptr = base_addr as *mut u8; + // SAFETY: trivially safe unsafe { *ptr = 1; } @@ -276,6 +286,8 @@ fn handle_page_remove_success_impl() { page_handler .handle_page_remove(base_addr, second_page_addr) .unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { libc::madvise( base_addr as *mut libc::c_void, @@ -288,6 +300,7 @@ fn handle_page_remove_success_impl() { // read value on another thread to avoid blocking forever let join_handle = thread::spawn(move || { let ptr = base_addr as *mut u8; + // SAFETY: trivially safe unsafe { *ptr } }); @@ -313,6 +326,8 @@ fn handle_page_remove_invalid_address_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); page_handler.handle_page_fault(&uffd, base_addr).unwrap(); @@ -376,6 +391,8 @@ fn move_to_staging_data_written_before_enabling_impl() { let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); // write data before registering to userfaultfd + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() { *(i as *mut u8) = 1; @@ -387,8 +404,12 @@ fn move_to_staging_data_written_before_enabling_impl() { *(i as *mut u8) = 3; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -412,6 +433,7 @@ fn move_to_staging_data_written_before_enabling_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr1 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -420,6 +442,7 @@ fn move_to_staging_data_written_before_enabling_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr2 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -478,6 +501,8 @@ fn move_to_staging_hugepage_chunks_impl() { let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); // write data before registering to userfaultfd + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in page_idx_range(base_addr1 + pagesize(), base_addr1 + 3 * pagesize()) { *(page_idx_to_addr(i) as *mut u8) = 1; @@ -504,8 +529,12 @@ fn move_to_staging_hugepage_chunks_impl() { *(page_idx_to_addr(i) as *mut u8) = 5; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -528,12 +557,14 @@ fn move_to_staging_hugepage_chunks_impl() { let mut result = Vec::new(); for i in page_idx_range(base_addr1, base_addr1 + 5 * HUGEPAGE_SIZE) { let ptr = (page_idx_to_addr(i)) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } } for i in page_idx_range(base_addr2, base_addr2 + 5 * HUGEPAGE_SIZE) { let ptr = (page_idx_to_addr(i)) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -598,16 +629,34 @@ fn move_to_staging_invalid_base_addr_impl() { let regions = [region]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); // the base_addr is within the region assert_eq!( - unsafe { page_handler.move_to_staging(base_addr + pagesize(), &shm.shm, 0,) }.is_err(), + { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + page_handler + .move_to_staging(base_addr + pagesize(), &shm.shm, 0) + .is_err() + } + }, true ); // the base_addr is outside of the region assert_eq!( - unsafe { page_handler.move_to_staging(base_addr - pagesize(), &shm.shm, 0,) }.is_err(), + { + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + page_handler + .move_to_staging(base_addr - pagesize(), &shm.shm, 0) + .is_err() + } + }, true ); worker.close(); @@ -648,6 +697,8 @@ fn swap_out_success_impl() { let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); // write data before registering to userfaultfd + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() { *(i as *mut u8) = 1; @@ -656,8 +707,12 @@ fn swap_out_success_impl() { *(i as *mut u8) = 2; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -683,6 +738,7 @@ fn swap_out_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr1 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -691,6 +747,7 @@ fn swap_out_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr2 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -732,13 +789,19 @@ fn swap_out_handled_page_impl() { let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); // write data before registering to userfaultfd + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() { *(i as *mut u8) = 1; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); } @@ -754,6 +817,7 @@ fn swap_out_handled_page_impl() { let mut result = Vec::new(); for i in 0..pagesize() { let ptr = (base_addr1 + pagesize() + i) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -798,6 +862,8 @@ fn swap_out_twice_impl() { ]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in 0..pagesize() { *((base_addr1 + i) as *mut u8) = 1; @@ -806,8 +872,12 @@ fn swap_out_twice_impl() { *((base_addr2 + 2 * pagesize() + i) as *mut u8) = 4; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -826,18 +896,22 @@ fn swap_out_twice_impl() { let join_handle = thread::spawn(move || { for i in 0..pagesize() { let ptr = (base_addr1 + pagesize() + i) as *mut u8; + // SAFETY: trivially safe unsafe { *ptr = 5; } } for i in 0..pagesize() { let ptr = (base_addr1 + 2 * pagesize() + i) as *mut u8; + // SAFETY: trivially safe unsafe { *ptr = 6; } } }); wait_thread_with_timeout(join_handle, 100); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -862,6 +936,7 @@ fn swap_out_twice_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr1 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -870,6 +945,7 @@ fn swap_out_twice_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr2 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -917,6 +993,8 @@ fn swap_in_success_impl() { ]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr1 + pagesize()..base_addr1 + 2 * pagesize() { *(i as *mut u8) = 1; @@ -928,8 +1006,12 @@ fn swap_in_success_impl() { *(i as *mut u8) = 3; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -944,12 +1026,16 @@ fn swap_in_success_impl() { page_handler .handle_page_fault(&uffd, base_addr2 + pagesize()) .unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() { *(i as *mut u8) = 4; } } // move to staging memory. + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler .move_to_staging(base_addr2, &shm, 3 * pagesize() as u64) @@ -966,6 +1052,7 @@ fn swap_in_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr1 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -974,6 +1061,7 @@ fn swap_in_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr2 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -1021,6 +1109,8 @@ fn trim_success_impl() { ]; let page_handler = PageHandler::create(&file, &staging_shmem, ®ions, worker.channel.clone()).unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr1..base_addr1 + pagesize() { *(i as *mut u8) = 0; @@ -1038,8 +1128,12 @@ fn trim_success_impl() { *(i as *mut u8) = 3; } } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { register_regions(®ions, array::from_ref(&uffd)) }.unwrap(); + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -1067,6 +1161,8 @@ fn trim_success_impl() { .handle_page_fault(&uffd, base_addr2 + i * pagesize()) .unwrap(); } + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { for i in base_addr2 + pagesize()..base_addr2 + 2 * pagesize() { *(i as *mut u8) = 4; @@ -1074,6 +1170,8 @@ fn trim_success_impl() { } // move to staging memory. + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] unsafe { page_handler.move_to_staging(base_addr1, &shm, 0).unwrap(); page_handler @@ -1103,6 +1201,7 @@ fn trim_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr1 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } @@ -1111,6 +1210,7 @@ fn trim_success_impl() { for i in 0..3 { for j in 0..pagesize() { let ptr = (base_addr2 + i * pagesize() + j) as *mut u8; + // SAFETY: trivially safe unsafe { result.push(*ptr); } diff --git a/tests/plugins.rs b/tests/plugins.rs index a54ceacd4f..836edd2f70 100644 --- a/tests/plugins.rs +++ b/tests/plugins.rs @@ -141,6 +141,8 @@ fn test_plugin(src: &str) { } fn keep_fd_on_exec(f: &F) { + // SAFETY: safe because function doesn't modify memory and we don't care about the return + // value. unsafe { ioctl(f, 0x5450 /* FIONCLEX */); } diff --git a/third_party/libslirp-rs/src/context.rs b/third_party/libslirp-rs/src/context.rs index d17dd2df8d..97b62a08c2 100644 --- a/third_party/libslirp-rs/src/context.rs +++ b/third_party/libslirp-rs/src/context.rs @@ -87,8 +87,9 @@ pub struct Context { impl Drop for Context { fn drop(&mut self) { - // Safe because self.context is guaranteed to be valid or null upon construction. if !self.slirp.is_null() { + // SAFETY: + // Safe because self.context is guaranteed to be valid or null upon construction. unsafe { slirp_cleanup(self.slirp); } @@ -156,9 +157,11 @@ pub trait CallbackHandler { } extern "C" fn write_handler_callback(buf: *const c_void, len: usize, opaque: *mut c_void) -> isize { + // SAFETY: // Safe because we pass in opaque as exactly this type. let closure = unsafe { &mut *(opaque as *mut &mut dyn FnMut(&[u8]) -> isize) }; + // SAFETY: // Safe because libslirp provides us with a valid buffer & that buffer's length. let slice = unsafe { slice::from_raw_parts(buf as *const u8, len) }; @@ -166,9 +169,11 @@ extern "C" fn write_handler_callback(buf: *const c_void, len: usize, opaque: *mu } extern "C" fn read_handler_callback(buf: *mut c_void, len: usize, opaque: *mut c_void) -> isize { + // SAFETY: // Safe because we pass in opaque as exactly this type. let closure = unsafe { &mut *(opaque as *mut &mut dyn FnMut(&mut [u8]) -> isize) }; + // SAFETY: // Safe because libslirp provides us with a valid buffer & that buffer's length. let slice = unsafe { slice::from_raw_parts_mut(buf as *mut u8, len) }; @@ -275,6 +280,7 @@ impl fmt::Debug for PollEvents { } extern "C" fn add_poll_handler_callback(fd: c_int, events: c_int, opaque: *mut c_void) -> c_int { + // SAFETY: // Safe because we pass in opaque as exactly this type. let closure = unsafe { &mut *(opaque as *mut &mut dyn FnMut(i32, PollEvents) -> i32) }; @@ -282,6 +288,7 @@ extern "C" fn add_poll_handler_callback(fd: c_int, events: c_int, opaque: *mut c } extern "C" fn get_revents_handler_callback(idx: c_int, opaque: *mut c_void) -> c_int { + // SAFETY: // Safe because we pass in opaque as exactly this type. let closure = unsafe { &mut *(opaque as *mut &mut dyn FnMut(i32) -> PollEvents) }; @@ -295,9 +302,11 @@ extern "C" fn send_packet_handler( len: usize, opaque: *mut c_void, ) -> isize { + // SAFETY: // Safe because libslirp gives us a valid buffer & that buffer's length. let slice = unsafe { slice::from_raw_parts(buf as *const u8, len) }; + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. let res = unsafe { (*(opaque as *mut Context)) @@ -315,9 +324,11 @@ extern "C" fn send_packet_handler( } extern "C" fn guest_error_handler(msg: *const c_char, opaque: *mut c_void) { + // SAFETY: // Safe because libslirp gives us a valid C string representing the error message. let msg = str::from_utf8(unsafe { CStr::from_ptr(msg) }.to_bytes()).unwrap_or(""); + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. unsafe { (*(opaque as *mut Context)) @@ -327,6 +338,7 @@ extern "C" fn guest_error_handler(msg: *const c_char, opaque } extern "C" fn clock_get_ns_handler(opaque: *mut c_void) -> i64 { + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. unsafe { (*(opaque as *mut Context)) @@ -342,6 +354,7 @@ extern "C" fn timer_new_handler( ) -> *mut c_void { let callback = Box::new(move || { if let Some(cb) = cb { + // SAFETY: // Safe because libslirp gives us a valid callback function to call. unsafe { cb(cb_opaque); @@ -349,6 +362,7 @@ extern "C" fn timer_new_handler( } }); + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. let timer = unsafe { (*(opaque as *mut Context)) @@ -360,6 +374,7 @@ extern "C" fn timer_new_handler( } extern "C" fn timer_free_handler(timer: *mut c_void, opaque: *mut c_void) { + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. // Also, timer was created by us as exactly the type we unpack into. unsafe { @@ -375,6 +390,7 @@ extern "C" fn timer_mod_handler( expire_time: i64, opaque: *mut c_void, ) { + // SAFETY: // Safe because: // 1. We pass in opaque as exactly this type when constructing the Slirp object. // 2. timer was created by us as exactly the type we unpack into @@ -389,6 +405,7 @@ extern "C" fn timer_mod_handler( } extern "C" fn register_poll_fd_handler(fd: c_int, opaque: *mut c_void) { + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. unsafe { (*(opaque as *mut Context)) @@ -398,6 +415,7 @@ extern "C" fn register_poll_fd_handler(fd: c_int, opaque: *m } extern "C" fn unregister_poll_fd_handler(fd: c_int, opaque: *mut c_void) { + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. unsafe { (*(opaque as *mut Context)) @@ -407,6 +425,7 @@ extern "C" fn unregister_poll_fd_handler(fd: c_int, opaque: } extern "C" fn notify_handler(opaque: *mut c_void) { + // SAFETY: // Safe because we pass in opaque as exactly this type when constructing the Slirp object. unsafe { (*(opaque as *mut Context)).callback_handler.notify() } } @@ -497,6 +516,7 @@ impl Context { disable_dns: false, }; + // SAFETY: // Safe because we pass valid pointers (or null as appropriate) as parameters and we check // that the return value is valid. let slirp = unsafe { @@ -529,15 +549,18 @@ impl Context { pub fn handle_guest_input(&mut self) -> Result<()> { loop { match self.callback_handler.end_read_from_guest() { - Ok(ethernet_frame) => unsafe { + Ok(ethernet_frame) => { + // SAFETY: // Safe because the buffer (ethernet_frame) is valid & libslirp is provided // with the data's underlying length. - slirp_input( - self.slirp, - ethernet_frame.as_ptr(), - ethernet_frame.len() as i32, - ); - }, + unsafe { + slirp_input( + self.slirp, + ethernet_frame.as_ptr(), + ethernet_frame.len() as i32, + ); + } + } Err(e) if e.kind() == std::io::ErrorKind::InvalidData => { error!("error reading packet from guest: {}", e); } @@ -575,8 +598,12 @@ impl Context { } pub fn connection_info(&mut self) -> &str { - str::from_utf8(unsafe { CStr::from_ptr(slirp_connection_info(self.slirp)) }.to_bytes()) - .unwrap_or("") + str::from_utf8( + // TODO(b/315998194): Add safety comment + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { CStr::from_ptr(slirp_connection_info(self.slirp)) }.to_bytes(), + ) + .unwrap_or("") } /// Requests libslirp provide the set of sockets & events that should be polled for. These @@ -592,6 +619,7 @@ impl Context { F: FnMut(i32, PollEvents) -> i32, { let cb = &mut (&mut add_poll_cb as &mut dyn FnMut(i32, PollEvents) -> i32); + // SAFETY: // Safe because cb is only used while slirp_pollfds_fill is running, and self.slirp is // guaranteed to be valid. unsafe { @@ -615,6 +643,7 @@ impl Context { { let cb = &mut (&mut get_revents_cb as &mut dyn FnMut(i32) -> PollEvents); + // SAFETY: // Safe because cb is only used while slirp_pollfds_poll is running, and self.slirp is // guaranteed to be valid. unsafe { @@ -631,9 +660,10 @@ impl Context { where F: FnMut(&[u8]) -> isize, { + let cb = &mut (&mut write_cb as &mut dyn FnMut(&[u8]) -> isize); + // SAFETY: // Safe because cb is only used while state_save is running, and self.slirp is // guaranteed to be valid. - let cb = &mut (&mut write_cb as &mut dyn FnMut(&[u8]) -> isize); unsafe { slirp_state_save( self.slirp, @@ -647,13 +677,14 @@ impl Context { where F: FnMut(&mut [u8]) -> isize, { + let cb = &mut (&mut read_cb as &mut dyn FnMut(&mut [u8]) -> isize); + // SAFETY: // Safe because cb is only used while state_load is running, and self.slirp is // guaranteed to be valid. While this function may fail, interpretation of the error code // is the responsibility of the caller. // // TODO(nkgold): if state_load becomes used by crosvm, interpretation of the error code // should occur here. - let cb = &mut (&mut read_cb as &mut dyn FnMut(&mut [u8]) -> isize); unsafe { slirp_state_load( self.slirp, diff --git a/third_party/vmm_vhost/src/connection/tube.rs b/third_party/vmm_vhost/src/connection/tube.rs index 1c534ff384..a0c0feedaf 100644 --- a/third_party/vmm_vhost/src/connection/tube.rs +++ b/third_party/vmm_vhost/src/connection/tube.rs @@ -118,11 +118,13 @@ impl TubePlatformConnection { let files = match msg.rds.len() { 0 => None, - // Safe because we own r.rd and it is guaranteed valid. _ => Some( msg.rds .iter() - .map(|r| unsafe { File::from_raw_descriptor(r.rd) }) + .map(|r| + // SAFETY: + // Safe because we own r.rd and it is guaranteed valid. + unsafe { File::from_raw_descriptor(r.rd) }) .collect::>(), ), }; @@ -136,6 +138,7 @@ impl TubePlatformConnection { let copy_count = min(dest_iov.len(), msg.data.len() - bytes_read); + // SAFETY: // Safe because: // 1) msg.data and dest_iov do not overlap. // 2) copy_count is bounded by dest_iov's length and msg.data.len() so we can't diff --git a/third_party/vmm_vhost/src/lib.rs b/third_party/vmm_vhost/src/lib.rs index a8590b457a..e7bdc9004a 100644 --- a/third_party/vmm_vhost/src/lib.rs +++ b/third_party/vmm_vhost/src/lib.rs @@ -438,8 +438,9 @@ mod tests { #[cfg(windows)] let tubes = base::Tube::pair().unwrap(); #[cfg(windows)] - // Safe because we will be importing the Tube in the other thread. let descriptor = + // SAFETY: + // Safe because we will be importing the Tube in the other thread. unsafe { tube_transporter::packed_tube::pack(tubes.0, std::process::id()).unwrap() }; #[cfg(unix)] diff --git a/third_party/vmm_vhost/src/master.rs b/third_party/vmm_vhost/src/master.rs index 19da3d2c61..ae8fb200c2 100644 --- a/third_party/vmm_vhost/src/master.rs +++ b/third_party/vmm_vhost/src/master.rs @@ -138,6 +138,7 @@ impl Master { } let body = VhostUserMemory::new(ctx.regions.len() as u32); + // SAFETY: trivially safe let (_, payload, _) = unsafe { ctx.regions.align_to::() }; let hdr = self.send_request_with_payload( MasterReq::SET_MEM_TABLE, diff --git a/third_party/vmm_vhost/src/master_req_handler.rs b/third_party/vmm_vhost/src/master_req_handler.rs index 4285feb5fe..963d9a68f1 100644 --- a/third_party/vmm_vhost/src/master_req_handler.rs +++ b/third_party/vmm_vhost/src/master_req_handler.rs @@ -289,6 +289,7 @@ impl MasterReqHandler { buf: &[u8], ) -> Result { self.check_msg_size(hdr, size, mem::size_of::())?; + // SAFETY: above check ensures that buf is `T` sized. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) }; if !msg.is_valid() { return Err(Error::InvalidMessage); diff --git a/third_party/vmm_vhost/src/master_req_handler/unix.rs b/third_party/vmm_vhost/src/master_req_handler/unix.rs index 37db4208ac..4cc2a09ac0 100644 --- a/third_party/vmm_vhost/src/master_req_handler/unix.rs +++ b/third_party/vmm_vhost/src/master_req_handler/unix.rs @@ -26,9 +26,11 @@ impl MasterReqHandler { pub fn with_stream(backend: S) -> Result { Self::new( backend, - Box::new(|stream| unsafe { + Box::new(|stream| + // SAFETY: // Safe because we own the raw fd. - SafeDescriptor::from_raw_descriptor(stream.into_raw_fd()) + unsafe { + SafeDescriptor::from_raw_descriptor(stream.into_raw_fd()) }), ) } @@ -82,10 +84,12 @@ mod tests { let mut handler = MasterReqHandler::with_stream(backend).unwrap(); let tx_descriptor = handler.take_tx_descriptor(); + // SAFETY: return value of dup is checked. let fd = unsafe { libc::dup(tx_descriptor.as_raw_descriptor()) }; if fd < 0 { panic!("failed to duplicated tx fd!"); } + // SAFETY: fd is created above and is valid let stream = unsafe { SystemStream::from_raw_descriptor(fd) }; let mut fs_cache = Slave::from_stream(stream); @@ -112,10 +116,13 @@ mod tests { handler.set_reply_ack_flag(true); let tx_descriptor = handler.take_tx_descriptor(); + // SAFETY: return value of dup is checked. let fd = unsafe { libc::dup(tx_descriptor.as_raw_descriptor()) }; if fd < 0 { panic!("failed to duplicated tx fd!"); } + + // SAFETY: fd is created above and is valid let stream = unsafe { SystemStream::from_raw_descriptor(fd) }; let mut fs_cache = Slave::from_stream(stream); diff --git a/third_party/vmm_vhost/src/master_req_handler/windows.rs b/third_party/vmm_vhost/src/master_req_handler/windows.rs index 86613d2e43..e01a174d7c 100644 --- a/third_party/vmm_vhost/src/master_req_handler/windows.rs +++ b/third_party/vmm_vhost/src/master_req_handler/windows.rs @@ -18,8 +18,10 @@ impl MasterReqHandler { pub fn with_tube(backend: S, backend_pid: u32) -> Result { Self::new( backend, - Box::new(move |tube| unsafe { + Box::new(move |tube| + // SAFETY: // Safe because we expect the tube to be unpacked in the other process. + unsafe { packed_tube::pack(tube, backend_pid).expect("packed tube") }), ) @@ -86,6 +88,7 @@ mod tests { let event = base::Event::new().unwrap(); let tx_descriptor = handler.take_tx_descriptor(); + // SAFETY: // Safe because we only do it once. let stream = unsafe { packed_tube::unpack(tx_descriptor).unwrap() }; let mut fs_cache = Slave::from_stream(stream); @@ -117,6 +120,7 @@ mod tests { let event = base::Event::new().unwrap(); let tx_descriptor = handler.take_tx_descriptor(); + // SAFETY: // Safe because we only do it once. let stream = unsafe { packed_tube::unpack(tx_descriptor).unwrap() }; let mut fs_cache = Slave::from_stream(stream); diff --git a/tools/impl/bindgen-common.sh b/tools/impl/bindgen-common.sh index 9dfe6d57f5..fbc2e36b25 100755 --- a/tools/impl/bindgen-common.sh +++ b/tools/impl/bindgen-common.sh @@ -18,6 +18,7 @@ export BINDGEN_OPTS=( export BINDGEN_HEADER="/* automatically generated by tools/bindgen-all-the-things */ #![allow(clippy::missing_safety_doc)] +#![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::upper_case_acronyms)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] diff --git a/usb_sys/src/lib.rs b/usb_sys/src/lib.rs index 27c6d5f502..897df07a94 100644 --- a/usb_sys/src/lib.rs +++ b/usb_sys/src/lib.rs @@ -160,12 +160,16 @@ pub struct usbdevfs_urb { impl Default for usbdevfs_urb { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } +// SAFETY: // The structure that embeds this should ensure that this is safe. unsafe impl Send for usbdevfs_urb {} +// SAFETY: +// The structure that embeds this should ensure that this is safe. unsafe impl Sync for usbdevfs_urb {} #[repr(C)] @@ -211,6 +215,7 @@ pub struct usbdevfs_streams { impl Default for usbdevfs_streams { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } diff --git a/usb_util/src/device.rs b/usb_util/src/device.rs index 504b697ac2..bc338d88af 100644 --- a/usb_util/src/device.rs +++ b/usb_util/src/device.rs @@ -74,11 +74,13 @@ impl DmaBuffer { } pub fn as_slice(&self) -> &[u8] { + // SAFETY: // Safe because the region has been lent by a device unsafe { std::slice::from_raw_parts(self.addr as *const u8, self.size) } } pub fn as_mut_slice(&mut self) -> &mut [u8] { + // SAFETY: // Safe because the region has been lent by a device unsafe { std::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) } } @@ -270,6 +272,7 @@ impl Device { let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb; + // SAFETY: // Safe because we control the lifetime of the URB via Arc::into_raw() and // Arc::from_raw() in poll_transfers(). unsafe { @@ -290,8 +293,9 @@ impl Device { // Reap completed transfers until we get EAGAIN. loop { let mut urb_ptr: *mut usb_sys::usbdevfs_urb = std::ptr::null_mut(); - // Safe because we provide a valid urb_ptr to be filled by the kernel. let result = + // SAFETY: + // Safe because we provide a valid urb_ptr to be filled by the kernel. unsafe { self.ioctl_with_mut_ref(usb_sys::USBDEVFS_REAPURBNDELAY(), &mut urb_ptr) }; match result { // EAGAIN indicates no more completed transfers right now. @@ -304,9 +308,10 @@ impl Device { break; } + let rc_transfer: Arc = + // SAFETY: // Safe because the URB usercontext field is always set to the result of // Arc::into_raw() in submit_transfer(). - let rc_transfer: Arc = unsafe { Arc::from_raw((*urb_ptr).usercontext as *const Transfer) }; // There should always be exactly one strong reference to rc_transfer, @@ -343,6 +348,7 @@ impl Device { _ => return Ok(()), } + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor. let result = unsafe { self.ioctl(usb_sys::USBDEVFS_RESET()) }; @@ -365,6 +371,7 @@ impl Device { flags: 0, driver: [0u8; 256], }; + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevs_disconnect_claim structure. unsafe { @@ -377,6 +384,7 @@ impl Device { /// Release an interface previously claimed with `claim_interface()`. pub fn release_interface(&self, interface_number: u8) -> Result<()> { let ifnum: c_uint = interface_number.into(); + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { @@ -396,6 +404,7 @@ impl Device { interface: interface_number.into(), altsetting: alternative_setting.into(), }; + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_setinterface structure. unsafe { @@ -407,6 +416,7 @@ impl Device { /// Set active configuration for this device. pub fn set_active_configuration(&mut self, config: u8) -> Result<()> { let config: c_int = config.into(); + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to int. unsafe { @@ -473,6 +483,7 @@ impl Device { timeout: 5000, // milliseconds data: &mut active_config as *mut u8 as *mut c_void, }; + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_ctrltransfer structure. unsafe { @@ -489,6 +500,7 @@ impl Device { /// Clear the halt/stall condition for an endpoint. pub fn clear_halt(&self, ep_addr: u8) -> Result<()> { let endpoint: c_uint = ep_addr.into(); + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { @@ -500,6 +512,7 @@ impl Device { /// Get speed of this device. pub fn get_speed(&self) -> Result> { + // SAFETY: args are valid and the return value is checked let speed = unsafe { self.ioctl(usb_sys::USBDEVFS_GET_SPEED()) }?; match speed { 1 => Ok(Some(DeviceSpeed::Low)), // Low Speed @@ -520,9 +533,11 @@ impl Device { let mut streams = vec_with_array_field::(1); streams[0].num_streams = num_streams as c_uint; streams[0].num_eps = 1 as c_uint; + // SAFETY: // Safe because we have allocated enough memory let eps = unsafe { streams[0].eps.as_mut_slice(1) }; eps[0] = ep as c_uchar; + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_streams structure. unsafe { @@ -535,9 +550,11 @@ impl Device { pub fn free_streams(&self, ep: u8) -> Result<()> { let mut streams = vec_with_array_field::(1); streams[0].num_eps = 1 as c_uint; + // SAFETY: // Safe because we have allocated enough memory let eps = unsafe { streams[0].eps.as_mut_slice(1) }; eps[0] = ep as c_uchar; + // SAFETY: // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_streams structure. unsafe { @@ -588,6 +605,7 @@ impl Transfer { .try_into() .map_err(Error::InvalidBufferLength)?; + // SAFETY: // Safe because we ensured there is enough space in transfer.urb to hold the number of // isochronous frames required. let iso_frame_desc = unsafe { @@ -675,6 +693,7 @@ impl TransferHandle { Some(fd) => fd, }; + // SAFETY: // Safe because fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_urb structure. if unsafe { diff --git a/vfio_sys/src/plat.rs b/vfio_sys/src/plat.rs index 6495194529..35a2b71749 100644 --- a/vfio_sys/src/plat.rs +++ b/vfio_sys/src/plat.rs @@ -162,6 +162,7 @@ pub union acpi_evt_forward_set__bindgen_ty_1 { impl Default for acpi_evt_forward_set__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/vfio_sys/src/vfio.rs b/vfio_sys/src/vfio.rs index 5b255e4239..f8a1208bcb 100644 --- a/vfio_sys/src/vfio.rs +++ b/vfio_sys/src/vfio.rs @@ -66,10 +66,18 @@ impl __IncompleteArrayField { pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) @@ -383,6 +391,7 @@ pub union vfio_device_gfx_plane_info__bindgen_ty_1 { impl Default for vfio_device_gfx_plane_info__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -392,6 +401,7 @@ impl Default for vfio_device_gfx_plane_info__bindgen_ty_1 { impl Default for vfio_device_gfx_plane_info { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -515,6 +525,7 @@ pub struct vfio_bitmap { impl Default for vfio_bitmap { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -547,6 +558,7 @@ pub struct vfio_iommu_type1_dirty_bitmap_get { impl Default for vfio_iommu_type1_dirty_bitmap_get { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -593,6 +605,7 @@ pub union vfio_eeh_pe_op__bindgen_ty_1 { impl Default for vfio_eeh_pe_op__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -602,6 +615,7 @@ impl Default for vfio_eeh_pe_op__bindgen_ty_1 { impl Default for vfio_eeh_pe_op { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/vhost/src/lib.rs b/vhost/src/lib.rs index b5c89933e5..99f32e5db1 100644 --- a/vhost/src/lib.rs +++ b/vhost/src/lib.rs @@ -79,6 +79,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { /// Set the current process as the owner of this file descriptor. /// This must be run before any other vhost ioctls. fn set_owner(&self) -> Result<()> { + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl(self, virtio_sys::VHOST_SET_OWNER()) }; @@ -91,6 +92,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { /// Give up ownership and reset the device to default values. Allows a subsequent call to /// `set_owner` to succeed. fn reset_owner(&self) -> Result<()> { + // SAFETY: // This ioctl is called on a valid vhost fd and has its // return value checked. let ret = unsafe { ioctl(self, virtio_sys::VHOST_RESET_OWNER()) }; @@ -103,6 +105,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { /// Get a bitmask of supported virtio/vhost features. fn get_features(&self) -> Result { let mut avail_features: u64 = 0; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { @@ -120,6 +123,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { /// # Arguments /// * `features` - Bitmask of features to set. fn set_features(&self, features: u64) -> Result<()> { + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_FEATURES(), &features) }; @@ -143,11 +147,13 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { let layout = Layout::from_size_align(size, ALIGN_OF_MEMORY).expect("impossible layout"); let mut allocation = LayoutAllocation::zeroed(layout); + // SAFETY: // Safe to obtain an exclusive reference because there are no other // references to the allocation yet and all-zero is a valid bit pattern. let vhost_memory = unsafe { allocation.as_mut::() }; vhost_memory.nregions = num_regions as u32; + // SAFETY: // regions is a zero-length array, so taking a mut slice requires that // we correctly specify the size to match the amount of backing memory. let vhost_regions = unsafe { vhost_memory.regions.as_mut_slice(num_regions) }; @@ -161,6 +167,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { }; } + // SAFETY: // This ioctl is called with a pointer that is valid for the lifetime // of this function. The kernel will make its own copy of the memory // tables. As always, check the return value. @@ -185,6 +192,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { num: num as u32, }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_NUM(), &vring_state) }; @@ -289,6 +297,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { log_guest_addr: log_addr as u64, }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_ADDR(), &vring_addr) }; @@ -309,6 +318,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { num: num as u32, }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_BASE(), &vring_state) }; @@ -328,6 +338,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { num: 0, }; + // SAFETY: // Safe because this will only modify `vring_state` and we check the return value. let ret = unsafe { ioctl_with_mut_ref(self, virtio_sys::VHOST_GET_VRING_BASE(), &mut vring_state) @@ -350,6 +361,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { fd: event.as_raw_descriptor(), }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_CALL(), &vring_file) }; @@ -370,6 +382,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { fd: event.as_raw_descriptor(), }; + // SAFETY: // This ioctl is called on a valid vhost_net fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_ERR(), &vring_file) }; @@ -391,6 +404,7 @@ pub trait Vhost: AsRawDescriptor + std::marker::Sized { fd: event.as_raw_descriptor(), }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, virtio_sys::VHOST_SET_VRING_KICK(), &vring_file) }; diff --git a/vhost/src/net.rs b/vhost/src/net.rs index 5557816b16..d3236a2ccd 100644 --- a/vhost/src/net.rs +++ b/vhost/src/net.rs @@ -68,6 +68,7 @@ where fd: event.map_or(-1, |event| event.as_raw_descriptor()), }; + // SAFETY: // This ioctl is called on a valid vhost_net descriptor and has its // return value checked. let ret = unsafe { diff --git a/vhost/src/vsock.rs b/vhost/src/vsock.rs index 177532e117..d61064ac84 100644 --- a/vhost/src/vsock.rs +++ b/vhost/src/vsock.rs @@ -34,6 +34,7 @@ impl Vsock { /// # Arguments /// * `cid` - CID to assign to the guest pub fn set_cid(&self, cid: u64) -> Result<()> { + // SAFETY: Safe because descriptor is valid and the return value is checked. let ret = unsafe { ioctl_with_ref(&self.descriptor, VHOST_VSOCK_SET_GUEST_CID(), &cid) }; if ret < 0 { return ioctl_result(); @@ -53,6 +54,7 @@ impl Vsock { fn set_running(&self, running: bool) -> Result<()> { let on = ::std::os::raw::c_int::from(running); + // SAFETY: Safe because descriptor is valid and the return value is checked. let ret = unsafe { ioctl_with_ref(&self.descriptor, VHOST_VSOCK_SET_RUNNING(), &on) }; if ret < 0 { diff --git a/virtio_sys/src/vhost.rs b/virtio_sys/src/vhost.rs index 7797387a79..6c777fe8db 100644 --- a/virtio_sys/src/vhost.rs +++ b/virtio_sys/src/vhost.rs @@ -23,10 +23,18 @@ impl __IncompleteArrayField { pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) @@ -107,6 +115,7 @@ pub union vhost_msg__bindgen_ty_1 { impl Default for vhost_msg__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -116,6 +125,7 @@ impl Default for vhost_msg__bindgen_ty_1 { impl Default for vhost_msg { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -138,6 +148,7 @@ pub union vhost_msg_v2__bindgen_ty_1 { impl Default for vhost_msg_v2__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -147,6 +158,7 @@ impl Default for vhost_msg_v2__bindgen_ty_1 { impl Default for vhost_msg_v2 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -179,6 +191,7 @@ pub struct vhost_scsi_target { impl Default for vhost_scsi_target { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/virtio_sys/src/virtio_fs.rs b/virtio_sys/src/virtio_fs.rs index 31f2271815..a257d96211 100644 --- a/virtio_sys/src/virtio_fs.rs +++ b/virtio_sys/src/virtio_fs.rs @@ -23,6 +23,7 @@ pub struct virtio_fs_config { impl Default for virtio_fs_config { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/virtio_sys/src/virtio_net.rs b/virtio_sys/src/virtio_net.rs index a3783a4ddb..25caf6e005 100644 --- a/virtio_sys/src/virtio_net.rs +++ b/virtio_sys/src/virtio_net.rs @@ -28,10 +28,18 @@ impl __IncompleteArrayField { pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } + /// # Safety + /// + /// It is caller's responsibility to ensure that `self` exists as long as returned slice is + /// in use. #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) @@ -183,6 +191,7 @@ pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3 { impl Default for virtio_net_hdr_v1__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -192,6 +201,7 @@ impl Default for virtio_net_hdr_v1__bindgen_ty_1 { impl Default for virtio_net_hdr_v1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() @@ -209,6 +219,7 @@ pub struct virtio_net_hdr_v1_hash { impl Default for virtio_net_hdr_v1_hash { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/virtio_sys/src/virtio_ring.rs b/virtio_sys/src/virtio_ring.rs index 23f9d7e2bf..d09495d972 100644 --- a/virtio_sys/src/virtio_ring.rs +++ b/virtio_sys/src/virtio_ring.rs @@ -23,10 +23,14 @@ impl __IncompleteArrayField { pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } + /// # Safety + /// It is caller's responsibility to ensure that slice doesn't live after `self` #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } + /// # Safety + /// It is caller's responsibility to ensure that slice doesn't live after `self` #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) @@ -102,6 +106,7 @@ pub struct vring { impl Default for vring { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/virtio_sys/src/virtio_scsi.rs b/virtio_sys/src/virtio_scsi.rs index df74e5d9fb..f23ccd33d9 100644 --- a/virtio_sys/src/virtio_scsi.rs +++ b/virtio_sys/src/virtio_scsi.rs @@ -82,6 +82,7 @@ pub struct virtio_scsi_cmd_resp { impl Default for virtio_scsi_cmd_resp { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::::uninit(); + // SAFETY: Safe because s is aligned and is initialized in the block. unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs index 25af29abf5..a914bd103c 100644 --- a/vm_control/src/lib.rs +++ b/vm_control/src/lib.rs @@ -389,6 +389,7 @@ pub enum VmMemorySource { // The following are wrappers to avoid base dependencies in the rutabaga crate fn to_rutabaga_desciptor(s: SafeDescriptor) -> RutabagaDescriptor { + // SAFETY: // Safe because we own the SafeDescriptor at this point. unsafe { RutabagaDescriptor::from_raw_descriptor(s.into_raw_descriptor()) } } @@ -403,6 +404,10 @@ impl RutabagaMemoryRegion { } } +// SAFETY: +// +// Self guarantees `ptr`..`ptr+size` is an mmaped region owned by this object that +// can't be unmapped during the `MappedRegion`'s lifetime. unsafe impl MappedRegion for RutabagaMemoryRegion { fn as_ptr(&self) -> *mut u8 { self.region.as_ptr() diff --git a/vm_control/src/sys/windows/gpu.rs b/vm_control/src/sys/windows/gpu.rs index 73aacb875e..22b4cac0b1 100644 --- a/vm_control/src/sys/windows/gpu.rs +++ b/vm_control/src/sys/windows/gpu.rs @@ -57,6 +57,7 @@ pub struct DisplayDataProvider; impl ProvideDisplayData for DisplayDataProvider { fn get_host_display_size() -> (u32, u32) { + // SAFETY: // Safe because we're passing valid values and screen size won't exceed u32 range. let (width, height) = unsafe { ( diff --git a/vm_memory/src/guest_memory.rs b/vm_memory/src/guest_memory.rs index 5497875945..f36ab91b8f 100644 --- a/vm_memory/src/guest_memory.rs +++ b/vm_memory/src/guest_memory.rs @@ -720,9 +720,12 @@ impl GuestMemory { /// ``` pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> { let (mapping, offset, _) = self.find_region(guest_addr)?; - // This is safe; `find_region` already checks that offset is in - // bounds. - Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8) + Ok( + // SAFETY: + // This is safe; `find_region` already checks that offset is in + // bounds. + unsafe { mapping.as_ptr().add(offset) } as *const u8, + ) } /// Convert a GuestAddress into a pointer in the address space of this @@ -766,9 +769,12 @@ impl GuestMemory { return Err(Error::InvalidGuestAddress(guest_addr)); } - // This is safe; `find_region` already checks that offset is in - // bounds. - Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8) + Ok( + //SAFETY: + // This is safe; `find_region` already checks that offset is in + // bounds. + unsafe { mapping.as_ptr().add(offset) } as *const u8, + ) } /// Returns a reference to the region that backs the given address. @@ -916,6 +922,7 @@ struct MemorySnapshotMetadata { regions: Vec<(u64, usize)>, } +// SAFETY: // It is safe to implement BackingMemory because GuestMemory can be mutated any time already. unsafe impl BackingMemory for GuestMemory { fn get_volatile_slice( diff --git a/vm_memory/src/udmabuf/sys/linux.rs b/vm_memory/src/udmabuf/sys/linux.rs index ffa92acfb5..2268cc6094 100644 --- a/vm_memory/src/udmabuf/sys/linux.rs +++ b/vm_memory/src/udmabuf/sys/linux.rs @@ -113,6 +113,7 @@ impl UdmabufDriverTrait for UnixUdmabufDriver { items[i].size = len as u64; } + // SAFETY: // Safe because we always allocate enough space for `udmabuf_create_list`. let fd = unsafe { let create_list = list.as_mut_ptr(); @@ -124,6 +125,7 @@ impl UdmabufDriverTrait for UnixUdmabufDriver { return Err(UdmabufError::DmabufCreationFail(IoError::last_os_error())); } + // SAFETY: // Safe because we validated the file exists. Ok(unsafe { SafeDescriptor::from_raw_descriptor(fd) }) } diff --git a/x86_64/src/acpi.rs b/x86_64/src/acpi.rs index 0e9f939804..338b1ddc63 100644 --- a/x86_64/src/acpi.rs +++ b/x86_64/src/acpi.rs @@ -446,11 +446,13 @@ fn sync_acpi_id_from_cpuid( return Err(e); } + // SAFETY: // Safe because we pass 0 and 0 for this call and the host supports the // `cpuid` instruction let mut cpuid_entry: CpuidResult = unsafe { __cpuid_count(0, 0) }; if cpuid_entry.eax >= 0xB { + // SAFETY: // Safe because we pass 0xB and 0 for this call and the host supports the // `cpuid` instruction cpuid_entry = unsafe { __cpuid_count(0xB, 0) }; @@ -488,6 +490,7 @@ fn sync_acpi_id_from_cpuid( if !has_leafb { if !get_apic_id { + // SAFETY: // Safe because we pass 1 for this call and the host supports the // `cpuid` instruction cpuid_entry = unsafe { __cpuid(1) }; diff --git a/x86_64/src/bootparam.rs b/x86_64/src/bootparam.rs index c84c6ada24..0b24a4e1dd 100644 --- a/x86_64/src/bootparam.rs +++ b/x86_64/src/bootparam.rs @@ -200,6 +200,7 @@ pub struct edd_device_params__bindgen_ty_1__bindgen_ty_6 { } impl Default for edd_device_params__bindgen_ty_1 { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } @@ -293,11 +294,13 @@ pub struct edd_device_params__bindgen_ty_2__bindgen_ty_10 { } impl Default for edd_device_params__bindgen_ty_2 { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } impl Default for edd_device_params { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } @@ -314,6 +317,7 @@ pub struct edd_info { } impl Default for edd_info { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } @@ -332,6 +336,7 @@ pub struct edid_info { } impl Default for edid_info { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } @@ -458,6 +463,7 @@ pub struct boot_params { } impl Default for boot_params { fn default() -> Self { + // SAFETY: trivially safe unsafe { ::std::mem::zeroed() } } } diff --git a/x86_64/src/cpuid.rs b/x86_64/src/cpuid.rs index 9ee6ece917..8d9ef26a19 100644 --- a/x86_64/src/cpuid.rs +++ b/x86_64/src/cpuid.rs @@ -145,6 +145,7 @@ pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) { entry.cpuid.ebx |= EBX_CLFLUSH_CACHELINE << EBX_CLFLUSH_SIZE_SHIFT; // Expose HT flag to Guest. + // SAFETY: trivially safe let result = unsafe { (ctx.cpuid)(entry.function) }; entry.cpuid.edx |= result.edx & (1 << EDX_HTT_SHIFT); return; @@ -164,9 +165,13 @@ pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) { 2 | // Cache and TLB Descriptor information 0x80000002 | 0x80000003 | 0x80000004 | // Processor Brand String 0x80000005 | 0x80000006 // L1 and L2 cache information - => entry.cpuid = unsafe { (ctx.cpuid)(entry.function) }, + => entry.cpuid = { + // SAFETY: trivially safe + unsafe { (ctx.cpuid)(entry.function) }}, 4 => { - entry.cpuid = unsafe { (ctx.cpuid_count)(entry.function, entry.index) }; + entry.cpuid = { + // SAFETY: trivially safe + unsafe { (ctx.cpuid_count)(entry.function, entry.index) }}; if ctx.cpu_config.host_cpu_topology { return; @@ -185,9 +190,11 @@ pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) { } } 6 => { - // Safe because we pass 6 for this call and the host - // supports the `cpuid` instruction - let result = unsafe { (ctx.cpuid)(entry.function) }; + let result = { + // SAFETY: + // Safe because we pass 6 for this call and the host + // supports the `cpuid` instruction + unsafe { (ctx.cpuid)(entry.function) }}; if ctx.cpu_config.enable_hwp { entry.cpuid.eax |= result.eax & (1 << EAX_HWP_SHIFT); @@ -202,6 +209,7 @@ pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) { } 7 => { if ctx.cpu_config.host_cpu_topology && entry.index == 0 { + // SAFETY: // Safe because we pass 7 and 0 for this call and the host supports the // `cpuid` instruction let result = unsafe { (ctx.cpuid_count)(entry.function, entry.index) }; @@ -220,6 +228,7 @@ pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) { 0x1A => { // Hybrid information leaf. if ctx.cpu_config.host_cpu_topology { + // SAFETY: // Safe because we pass 0x1A for this call and the host supports the // `cpuid` instruction entry.cpuid = unsafe { (ctx.cpuid)(entry.function) }; @@ -355,6 +364,7 @@ const INTEL_EDX: u32 = u32::from_le_bytes([b'i', b'n', b'e', b'I']); const INTEL_ECX: u32 = u32::from_le_bytes([b'n', b't', b'e', b'l']); pub fn cpu_manufacturer() -> CpuManufacturer { + // SAFETY: // safe because MANUFACTURER_ID_FUNCTION is a well known cpuid function, // and we own the result value afterwards. let result = unsafe { __cpuid(MANUFACTURER_ID_FUNCTION) }; diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs index e55731512b..8689ac7820 100644 --- a/x86_64/src/lib.rs +++ b/x86_64/src/lib.rs @@ -2279,12 +2279,14 @@ impl CpuIdCall { pub fn check_host_hybrid_support(cpuid: &CpuIdCall) -> std::result::Result<(), HybridSupportError> { // CPUID.0H.EAX returns maximum input value for basic CPUID information. // + // SAFETY: // Safe because we pass 0 for this call and the host supports the // `cpuid` instruction. let mut cpuid_entry = unsafe { (cpuid.cpuid)(0x0) }; if cpuid_entry.eax < 0x1A { return Err(HybridSupportError::UnsupportedHostCpu); } + // SAFETY: // Safe because we pass 0x7 and 0 for this call and the host supports the // `cpuid` instruction. cpuid_entry = unsafe { (cpuid.cpuid_count)(0x7, 0) }; @@ -2296,6 +2298,7 @@ pub fn check_host_hybrid_support(cpuid: &CpuIdCall) -> std::result::Result<(), H // 0 is returned in all the registers. // For the CPU with hybrid support, its CPUID.1AH.EAX shouldn't be zero. // + // SAFETY: // Safe because we pass 0 for this call and the host supports the // `cpuid` instruction. cpuid_entry = unsafe { (cpuid.cpuid)(0x1A) }; diff --git a/x86_64/src/mpspec.rs b/x86_64/src/mpspec.rs index 96fb539a04..7a59e45690 100644 --- a/x86_64/src/mpspec.rs +++ b/x86_64/src/mpspec.rs @@ -75,6 +75,7 @@ fn bindgen_test_layout_mpf_intel() { concat!("Alignment of ", stringify!(mpf_intel)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).signature as *const _ as usize }, 0usize, concat!( @@ -85,6 +86,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).physptr as *const _ as usize }, 4usize, concat!( @@ -95,6 +97,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).length as *const _ as usize }, 8usize, concat!( @@ -105,6 +108,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).specification as *const _ as usize }, 9usize, concat!( @@ -115,6 +119,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).checksum as *const _ as usize }, 10usize, concat!( @@ -125,6 +130,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).feature1 as *const _ as usize }, 11usize, concat!( @@ -135,6 +141,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).feature2 as *const _ as usize }, 12usize, concat!( @@ -145,6 +152,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).feature3 as *const _ as usize }, 13usize, concat!( @@ -155,6 +163,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).feature4 as *const _ as usize }, 14usize, concat!( @@ -165,6 +174,7 @@ fn bindgen_test_layout_mpf_intel() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpf_intel)).feature5 as *const _ as usize }, 15usize, concat!( @@ -208,6 +218,7 @@ fn bindgen_test_layout_mpc_table() { concat!("Alignment of ", stringify!(mpc_table)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).signature as *const _ as usize }, 0usize, concat!( @@ -218,6 +229,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).length as *const _ as usize }, 4usize, concat!( @@ -228,6 +240,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).spec as *const _ as usize }, 6usize, concat!( @@ -238,6 +251,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).checksum as *const _ as usize }, 7usize, concat!( @@ -248,6 +262,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).oem as *const _ as usize }, 8usize, concat!( @@ -258,6 +273,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).productid as *const _ as usize }, 16usize, concat!( @@ -268,6 +284,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).oemptr as *const _ as usize }, 28usize, concat!( @@ -278,6 +295,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).oemsize as *const _ as usize }, 32usize, concat!( @@ -288,6 +306,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).oemcount as *const _ as usize }, 34usize, concat!( @@ -298,6 +317,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).lapic as *const _ as usize }, 36usize, concat!( @@ -308,6 +328,7 @@ fn bindgen_test_layout_mpc_table() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_table)).reserved as *const _ as usize }, 40usize, concat!( @@ -347,6 +368,7 @@ fn bindgen_test_layout_mpc_cpu() { concat!("Alignment of ", stringify!(mpc_cpu)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).type_ as *const _ as usize }, 0usize, concat!( @@ -357,6 +379,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).apicid as *const _ as usize }, 1usize, concat!( @@ -367,6 +390,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).apicver as *const _ as usize }, 2usize, concat!( @@ -377,6 +401,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).cpuflag as *const _ as usize }, 3usize, concat!( @@ -387,6 +412,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).cpufeature as *const _ as usize }, 4usize, concat!( @@ -397,6 +423,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).featureflag as *const _ as usize }, 8usize, concat!( @@ -407,6 +434,7 @@ fn bindgen_test_layout_mpc_cpu() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_cpu)).reserved as *const _ as usize }, 12usize, concat!( @@ -442,6 +470,7 @@ fn bindgen_test_layout_mpc_bus() { concat!("Alignment of ", stringify!(mpc_bus)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_bus)).type_ as *const _ as usize }, 0usize, concat!( @@ -452,6 +481,7 @@ fn bindgen_test_layout_mpc_bus() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_bus)).busid as *const _ as usize }, 1usize, concat!( @@ -462,6 +492,7 @@ fn bindgen_test_layout_mpc_bus() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_bus)).bustype as *const _ as usize }, 2usize, concat!( @@ -499,6 +530,7 @@ fn bindgen_test_layout_mpc_ioapic() { concat!("Alignment of ", stringify!(mpc_ioapic)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_ioapic)).type_ as *const _ as usize }, 0usize, concat!( @@ -509,6 +541,7 @@ fn bindgen_test_layout_mpc_ioapic() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_ioapic)).apicid as *const _ as usize }, 1usize, concat!( @@ -519,6 +552,7 @@ fn bindgen_test_layout_mpc_ioapic() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_ioapic)).apicver as *const _ as usize }, 2usize, concat!( @@ -529,6 +563,7 @@ fn bindgen_test_layout_mpc_ioapic() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_ioapic)).flags as *const _ as usize }, 3usize, concat!( @@ -539,6 +574,7 @@ fn bindgen_test_layout_mpc_ioapic() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_ioapic)).apicaddr as *const _ as usize }, 4usize, concat!( @@ -578,6 +614,7 @@ fn bindgen_test_layout_mpc_intsrc() { concat!("Alignment of ", stringify!(mpc_intsrc)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).type_ as *const _ as usize }, 0usize, concat!( @@ -588,6 +625,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).irqtype as *const _ as usize }, 1usize, concat!( @@ -598,6 +636,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).irqflag as *const _ as usize }, 2usize, concat!( @@ -608,6 +647,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).srcbus as *const _ as usize }, 4usize, concat!( @@ -618,6 +658,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).srcbusirq as *const _ as usize }, 5usize, concat!( @@ -628,6 +669,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).dstapic as *const _ as usize }, 6usize, concat!( @@ -638,6 +680,7 @@ fn bindgen_test_layout_mpc_intsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_intsrc)).dstirq as *const _ as usize }, 7usize, concat!( @@ -682,6 +725,7 @@ fn bindgen_test_layout_mpc_lintsrc() { concat!("Alignment of ", stringify!(mpc_lintsrc)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).type_ as *const _ as usize }, 0usize, concat!( @@ -692,6 +736,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).irqtype as *const _ as usize }, 1usize, concat!( @@ -702,6 +747,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).irqflag as *const _ as usize }, 2usize, concat!( @@ -712,6 +758,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).srcbusid as *const _ as usize }, 4usize, concat!( @@ -722,6 +769,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).srcbusirq as *const _ as usize }, 5usize, concat!( @@ -732,6 +780,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).destapic as *const _ as usize }, 6usize, concat!( @@ -742,6 +791,7 @@ fn bindgen_test_layout_mpc_lintsrc() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_lintsrc)).destapiclint as *const _ as usize }, 7usize, concat!( @@ -779,6 +829,7 @@ fn bindgen_test_layout_mpc_oemtable() { concat!("Alignment of ", stringify!(mpc_oemtable)) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_oemtable)).signature as *const _ as usize }, 0usize, concat!( @@ -789,6 +840,7 @@ fn bindgen_test_layout_mpc_oemtable() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_oemtable)).length as *const _ as usize }, 4usize, concat!( @@ -799,6 +851,7 @@ fn bindgen_test_layout_mpc_oemtable() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_oemtable)).rev as *const _ as usize }, 6usize, concat!( @@ -809,6 +862,7 @@ fn bindgen_test_layout_mpc_oemtable() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_oemtable)).checksum as *const _ as usize }, 7usize, concat!( @@ -819,6 +873,7 @@ fn bindgen_test_layout_mpc_oemtable() { ) ); assert_eq!( + // SAFETY: trivially safe unsafe { &(*(0 as *const mpc_oemtable)).mpc as *const _ as usize }, 8usize, concat!( diff --git a/x86_64/src/smbios.rs b/x86_64/src/smbios.rs index 4aa2257b3e..87ec52d095 100644 --- a/x86_64/src/smbios.rs +++ b/x86_64/src/smbios.rs @@ -70,6 +70,7 @@ const DEFAULT_SMBIOS_MANUFACTURER: &str = "ChromiumOS"; const DEFAULT_SMBIOS_PRODUCT_NAME: &str = "crosvm"; fn compute_checksum(v: &T) -> u8 { + // SAFETY: // Safe because we are only reading the bytes within the size of the `T` reference `v`. let v_slice = unsafe { slice::from_raw_parts(v as *const T as *const u8, mem::size_of::()) }; let mut checksum: u8 = 0;