devices: allow virtio-wayland to use virtgpu resources

This change uses the resource bridge between virtio-gpu and virtio-cpu
to send resources over the host wayland connection that originated from
the virtio-gpu device. This will help support gpu accelerated wayland
surfaces.

BUG=chromium:875998
TEST=wayland-simple-egl

Change-Id: I3340ecef438779be5cb3643b2de8bb8c33097d75
Reviewed-on: https://chromium-review.googlesource.com/1182793
Commit-Ready: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Tested-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
This commit is contained in:
Zach Reizner 2018-08-15 10:46:32 -07:00 committed by chrome-bot
parent 42c409c4d7
commit aa5756669a
10 changed files with 360 additions and 124 deletions

1
Cargo.lock generated
View file

@ -146,6 +146,7 @@ dependencies = [
"io_jail 0.1.0",
"kvm 0.1.0",
"libc 0.2.44 (registry+https://github.com/rust-lang/crates.io-index)",
"msg_on_socket_derive 0.1.0",
"msg_socket 0.1.0",
"net_sys 0.1.0",
"net_util 0.1.0",

View file

@ -15,8 +15,9 @@ gpu_display = { path = "../gpu_display", optional = true }
gpu_renderer = { path = "../gpu_renderer", optional = true }
kvm = { path = "../kvm" }
libc = "*"
msg_socket = { path = "../msg_socket" }
io_jail = { path = "../io_jail" }
msg_on_socket_derive = { path = "../msg_socket/msg_on_socket_derive" }
msg_socket = { path = "../msg_socket" }
net_sys = { path = "../net_sys" }
net_util = { path = "../net_util" }
p9 = { path = "../p9" }

View file

@ -9,18 +9,18 @@ extern crate data_model;
extern crate io_jail;
extern crate kvm;
extern crate libc;
extern crate msg_on_socket_derive;
extern crate msg_socket;
extern crate net_sys;
extern crate net_util;
extern crate p9;
extern crate resources;
extern crate sync;
#[macro_use]
extern crate sys_util;
extern crate vhost;
extern crate virtio_sys;
extern crate vm_control;
#[macro_use]
extern crate msg_socket;
extern crate sync;
mod bus;
mod cmos;

View file

@ -13,6 +13,7 @@ use std::usize;
use data_model::*;
use msg_socket::{MsgReceiver, MsgSender};
use sys_util::{GuestAddress, GuestMemory};
use super::gpu_buffer::{Buffer, Device, Flags, Format};
@ -22,6 +23,7 @@ use super::gpu_renderer::{
Renderer, Resource as GpuRendererResource, ResourceCreateArgs,
};
use super::super::resource_bridge::*;
use super::protocol::GpuResponse;
use super::protocol::{VIRTIO_GPU_CAPSET_VIRGL, VIRTIO_GPU_CAPSET_VIRGL2};
@ -349,6 +351,30 @@ impl Backend {
.unwrap_or(false)
}
pub fn process_resource_bridge(&self, resource_bridge: &ResourceResponseSocket) {
let request = match resource_bridge.recv() {
Ok(msg) => msg,
Err(e) => {
error!("error receiving resource bridge request: {:?}", e);
return;
}
};
let response = match request {
ResourceRequest::GetResource { id } => self
.resources
.get(&id)
.and_then(|resource| resource.buffer())
.and_then(|buffer| buffer.export_plane_fd(0).ok())
.map(|fd| ResourceResponse::Resource(fd))
.unwrap_or(ResourceResponse::Invalid),
};
if let Err(e) = resource_bridge.send(&response) {
error!("error sending resource bridge request: {:?}", e);
}
}
/// Gets the list of supported display resolutions as a slice of `(width, height)` tuples.
pub fn display_info(&self) -> &[(u32, u32)] {
&[(DEFAULT_WIDTH, DEFAULT_HEIGHT)]

View file

@ -30,7 +30,8 @@ use self::gpu_display::*;
use self::gpu_renderer::{format_fourcc, Renderer};
use super::{
AvailIter, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_GPU, VIRTIO_F_VERSION_1,
resource_bridge::*, AvailIter, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_GPU,
VIRTIO_F_VERSION_1,
};
use self::backend::Backend;
@ -89,6 +90,10 @@ impl Frontend {
self.backend.process_display()
}
fn process_resource_bridge(&self, resource_bridge: &ResourceResponseSocket) {
self.backend.process_resource_bridge(resource_bridge);
}
fn process_gpu_command(
&mut self,
mem: &GuestMemory,
@ -462,6 +467,7 @@ struct Worker {
ctrl_evt: EventFd,
cursor_queue: Queue,
cursor_evt: EventFd,
resource_bridge: Option<ResourceResponseSocket>,
kill_evt: EventFd,
state: Frontend,
}
@ -479,6 +485,7 @@ impl Worker {
CtrlQueue,
CursorQueue,
Display,
ResourceBridge,
InterruptResample,
Kill,
}
@ -501,6 +508,12 @@ impl Worker {
}
};
if let Some(ref resource_bridge) = self.resource_bridge {
if let Err(e) = poll_ctx.add(resource_bridge, Token::ResourceBridge) {
error!("failed to add resource bridge to PollContext: {:?}", e);
}
}
'poll: loop {
// If there are outstanding fences, wake up early to poll them.
let duration = if !self.state.fence_descriptors.is_empty() {
@ -517,6 +530,7 @@ impl Worker {
}
};
let mut signal_used = false;
let mut process_resource_bridge = false;
for event in events.iter_readable() {
match event.token() {
Token::CtrlQueue => {
@ -535,6 +549,7 @@ impl Worker {
let _ = self.exit_evt.write(1);
}
}
Token::ResourceBridge => process_resource_bridge = true,
Token::InterruptResample => {
let _ = self.interrupt_resample_evt.read();
if self.interrupt_status.load(Ordering::SeqCst) != 0 {
@ -570,6 +585,15 @@ impl Worker {
}
}
// Process the entire control queue before the resource bridge in case a resource is
// created or destroyed by the control queue. Processing the resource bridge first may
// lead to a race condition.
if process_resource_bridge {
if let Some(ref resource_bridge) = self.resource_bridge {
self.state.process_resource_bridge(resource_bridge);
}
}
if signal_used {
self.signal_used_queue();
}
@ -580,15 +604,21 @@ impl Worker {
pub struct Gpu {
config_event: bool,
exit_evt: EventFd,
resource_bridge: Option<ResourceResponseSocket>,
kill_evt: Option<EventFd>,
wayland_socket_path: PathBuf,
}
impl Gpu {
pub fn new<P: AsRef<Path>>(exit_evt: EventFd, wayland_socket_path: P) -> Gpu {
pub fn new<P: AsRef<Path>>(
exit_evt: EventFd,
resource_bridge: Option<ResourceResponseSocket>,
wayland_socket_path: P,
) -> Gpu {
Gpu {
config_event: false,
exit_evt,
resource_bridge,
kill_evt: None,
wayland_socket_path: wayland_socket_path.as_ref().to_path_buf(),
}
@ -695,6 +725,8 @@ impl VirtioDevice for Gpu {
};
self.kill_evt = Some(self_kill_evt);
let resource_bridge = self.resource_bridge.take();
let ctrl_queue = queues.remove(0);
let ctrl_evt = queue_evts.remove(0);
let cursor_queue = queues.remove(0);
@ -744,6 +776,7 @@ impl VirtioDevice for Gpu {
ctrl_evt,
cursor_queue,
cursor_evt,
resource_bridge,
kill_evt,
state: Frontend::new(Backend::new(device, display, renderer)),
}.run()

View file

@ -17,6 +17,7 @@ mod virtio_pci_common_config;
mod virtio_pci_device;
mod wl;
pub mod resource_bridge;
pub mod vhost;
pub use self::balloon::*;

View file

@ -0,0 +1,30 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! This module defines the protocol between `virtio-wayland` and `virtio-gpu` for sharing resources
//! that are backed by file descriptors.
use std::fs::File;
use std::io::Result;
use msg_on_socket_derive::MsgOnSocket;
use msg_socket::MsgSocket;
#[derive(MsgOnSocket)]
pub enum ResourceRequest {
GetResource { id: u32 },
}
#[derive(MsgOnSocket)]
pub enum ResourceResponse {
Resource(File),
Invalid,
}
pub type ResourceRequestSocket = MsgSocket<ResourceRequest, ResourceResponse>;
pub type ResourceResponseSocket = MsgSocket<ResourceResponse, ResourceRequest>;
pub fn pair() -> Result<(ResourceRequestSocket, ResourceResponseSocket)> {
msg_socket::pair()
}

View file

@ -67,6 +67,7 @@ use sys_util::{
#[cfg(feature = "wl-dmabuf")]
use sys_util::ioctl_with_ref;
use super::resource_bridge::*;
use super::{
DescriptorChain, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_WL, VIRTIO_F_VERSION_1,
};
@ -84,6 +85,8 @@ const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
#[cfg(feature = "wl-dmabuf")]
const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
#[cfg(feature = "gpu")]
const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
const VIRTIO_WL_RESP_OK: u32 = 4096;
const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
#[cfg(feature = "wl-dmabuf")]
@ -123,6 +126,9 @@ struct dma_buf_sync {
#[cfg(feature = "wl-dmabuf")]
ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
fn parse_new(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
const ID_OFFSET: u64 = 8;
const FLAGS_OFFSET: u64 = 12;
@ -214,7 +220,7 @@ fn parse_dmabuf_sync(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
})
}
fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp> {
fn parse_send(addr: GuestAddress, len: u32, foreign_id: bool, mem: &GuestMemory) -> WlResult<WlOp> {
const ID_OFFSET: u64 = 8;
const VFD_COUNT_OFFSET: u64 = 12;
const VFDS_OFFSET: u64 = 16;
@ -231,15 +237,21 @@ fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp>
let vfds_addr = mem
.checked_offset(addr, VFDS_OFFSET)
.ok_or(WlError::CheckedOffset)?;
let vfds_element_size = if foreign_id {
size_of::<CtrlVfdSendVfd>()
} else {
size_of::<Le32>()
} as u32;
let data_addr = mem
.checked_offset(vfds_addr, (vfd_count * 4) as u64)
.checked_offset(vfds_addr, (vfd_count * vfds_element_size) as u64)
.ok_or(WlError::CheckedOffset)?;
Ok(WlOp::Send {
id: id.into(),
foreign_id,
vfds_addr,
vfd_count,
data_addr,
data_len: len - (VFDS_OFFSET as u32) - vfd_count * 4,
data_len: len - (VFDS_OFFSET as u32) - vfd_count * vfds_element_size,
})
}
@ -259,7 +271,7 @@ fn parse_desc(desc: &DescriptorChain, mem: &GuestMemory) -> WlResult<WlOp> {
VIRTIO_WL_CMD_VFD_CLOSE => Ok(WlOp::Close {
id: parse_id(desc.addr, mem)?,
}),
VIRTIO_WL_CMD_VFD_SEND => parse_send(desc.addr, desc.len, mem),
VIRTIO_WL_CMD_VFD_SEND => parse_send(desc.addr, desc.len, false, mem),
VIRTIO_WL_CMD_VFD_NEW_CTX => Ok(WlOp::NewCtx {
id: parse_id(desc.addr, mem)?,
}),
@ -268,6 +280,8 @@ fn parse_desc(desc: &DescriptorChain, mem: &GuestMemory) -> WlResult<WlOp> {
VIRTIO_WL_CMD_VFD_NEW_DMABUF => parse_new_dmabuf(desc.addr, mem),
#[cfg(feature = "wl-dmabuf")]
VIRTIO_WL_CMD_VFD_DMABUF_SYNC => parse_dmabuf_sync(desc.addr, mem),
#[cfg(feature = "gpu")]
VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => parse_send(desc.addr, desc.len, true, mem),
v => Ok(WlOp::InvalidCommand { op_type: v }),
}
}
@ -553,6 +567,15 @@ struct CtrlVfd {
unsafe impl DataInit for CtrlVfd {}
#[repr(C)]
#[derive(Copy, Clone, Default)]
struct CtrlVfdSendVfd {
kind: Le32,
id: Le32,
}
unsafe impl DataInit for CtrlVfdSendVfd {}
#[derive(Debug)]
enum WlOp {
NewAlloc {
@ -565,6 +588,7 @@ enum WlOp {
},
Send {
id: u32,
foreign_id: bool,
vfds_addr: GuestAddress,
vfd_count: u32,
data_addr: GuestAddress,
@ -966,6 +990,7 @@ enum WlRecv {
struct WlState {
wayland_path: PathBuf,
vm: VmRequester,
resource_bridge: Option<ResourceRequestSocket>,
use_transition_flags: bool,
poll_ctx: PollContext<u32>,
vfds: Map<u32, WlVfd>,
@ -977,10 +1002,16 @@ struct WlState {
}
impl WlState {
fn new(wayland_path: PathBuf, vm_socket: UnixDatagram, use_transition_flags: bool) -> WlState {
fn new(
wayland_path: PathBuf,
vm_socket: UnixDatagram,
use_transition_flags: bool,
resource_bridge: Option<ResourceRequestSocket>,
) -> WlState {
WlState {
wayland_path,
vm: VmRequester::new(vm_socket),
resource_bridge,
poll_ctx: PollContext::new().expect("failed to create PollContext"),
use_transition_flags,
vfds: Map::new(),
@ -1179,21 +1210,96 @@ impl WlState {
}
}
fn send(&mut self, vfd_id: u32, vfds: VolatileSlice, data: VolatileSlice) -> WlResult<WlResp> {
let vfd_count = vfds.size() as usize / size_of::<Le32>();
let mut vfd_ids = [Le32::from(0); VIRTWL_SEND_MAX_ALLOCS];
vfds.copy_to(&mut vfd_ids[..]);
fn send(
&mut self,
vfd_id: u32,
foreign_id: bool,
vfds: VolatileSlice,
data: VolatileSlice,
) -> WlResult<WlResp> {
// First stage gathers and normalizes all id information from guest memory.
let mut send_vfd_ids = [CtrlVfdSendVfd::default(); VIRTWL_SEND_MAX_ALLOCS];
let vfd_count = if foreign_id {
vfds.copy_to(&mut send_vfd_ids[..]);
vfds.size() as usize / size_of::<CtrlVfdSendVfd>()
} else {
let vfd_count = vfds.size() as usize / size_of::<Le32>();
let mut vfd_ids = [Le32::from(0); VIRTWL_SEND_MAX_ALLOCS];
vfds.copy_to(&mut vfd_ids[..]);
send_vfd_ids[..vfd_count]
.iter_mut()
.zip(vfd_ids[..vfd_count].iter())
.for_each(|(send_vfd_id, &vfd_id)| {
*send_vfd_id = CtrlVfdSendVfd {
kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
id: vfd_id,
}
});
vfd_count
};
// Next stage collects corresponding file descriptors for each id.
let mut fds = [0; VIRTWL_SEND_MAX_ALLOCS];
for (&id, fd) in vfd_ids[..vfd_count].iter().zip(fds.iter_mut()) {
match self.vfds.get(&id.into()) {
Some(vfd) => match vfd.send_fd() {
Some(vfd_fd) => *fd = vfd_fd,
None => return Ok(WlResp::InvalidType),
#[cfg(feature = "gpu")]
let mut bridged_files = Vec::new();
for (&send_vfd_id, fd) in send_vfd_ids[..vfd_count].iter().zip(fds.iter_mut()) {
let id = send_vfd_id.id.to_native();
match send_vfd_id.kind.to_native() {
VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => match self.vfds.get(&id) {
Some(vfd) => match vfd.send_fd() {
Some(vfd_fd) => *fd = vfd_fd,
None => return Ok(WlResp::InvalidType),
},
None => {
warn!("attempt to send non-existant vfd 0x{:08x}", id);
return Ok(WlResp::InvalidId);
}
},
None => return Ok(WlResp::InvalidId),
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() =>
{
if let Err(e) = self
.resource_bridge
.as_ref()
.unwrap()
.send(&ResourceRequest::GetResource { id })
{
error!("error sending resource bridge request: {:?}", e);
return Ok(WlResp::InvalidId);
}
match self.resource_bridge.as_ref().unwrap().recv() {
Ok(ResourceResponse::Resource(bridged_file)) => {
*fd = bridged_file.as_raw_fd();
bridged_files.push(bridged_file);
}
Ok(ResourceResponse::Invalid) => {
warn!("attempt to send non-existant gpu resource {}", id);
return Ok(WlResp::InvalidId);
}
Err(e) => {
error!("error receiving resource bridge response: {:?}", e);
// If there was an error with the resource bridge, it can no longer be
// trusted to continue to function.
self.resource_bridge = None;
return Ok(WlResp::InvalidId);
}
}
}
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU => {
let _ = self.resource_bridge.as_ref();
warn!("attempt to send foreign resource kind but feature is disabled");
}
kind => {
warn!(
"attempt to send unknown foreign resource kind: {} id: {:08x}",
kind, id
);
return Ok(WlResp::InvalidId);
}
}
}
// Final stage sends file descriptors and data to the target vfd's socket.
match self.vfds.get_mut(&vfd_id) {
Some(vfd) => match vfd.send(&fds[..vfd_count], data)? {
WlResp::Ok => {}
@ -1203,10 +1309,12 @@ impl WlState {
}
// The vfds with remote FDs need to be closed so that the local side can receive
// hangup events.
for &id in &vfd_ids[..vfd_count] {
// The following unwrap can not panic because the IDs were already checked earlier in
// this method.
self.vfds.get_mut(&id.into()).unwrap().close_remote();
for &send_vfd_id in &send_vfd_ids[..vfd_count] {
if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
self.vfds
.get_mut(&send_vfd_id.id.into())
.map(|vfd| vfd.close_remote());
}
}
Ok(WlResp::Ok)
}
@ -1247,15 +1355,20 @@ impl WlState {
WlOp::Close { id } => self.close(id),
WlOp::Send {
id,
foreign_id,
vfds_addr,
vfd_count,
data_addr,
data_len,
} => {
let vfd_mem =
mem.get_slice(vfds_addr.0, (vfd_count as u64) * size_of::<Le32>() as u64)?;
let vfd_size = if foreign_id {
size_of::<CtrlVfdSendVfd>()
} else {
size_of::<Le32>()
} as u32;
let vfd_mem = mem.get_slice(vfds_addr.0, (vfd_count * vfd_size) as u64)?;
let data_mem = mem.get_slice(data_addr.0, data_len as u64)?;
self.send(id, vfd_mem, data_mem)
self.send(id, foreign_id, vfd_mem, data_mem)
}
WlOp::NewCtx { id } => self.new_context(id),
WlOp::NewPipe { id, flags } => self.new_pipe(id, flags),
@ -1378,6 +1491,7 @@ impl Worker {
wayland_path: PathBuf,
vm_socket: UnixDatagram,
use_transition_flags: bool,
resource_bridge: Option<ResourceRequestSocket>,
) -> Worker {
Worker {
mem,
@ -1386,7 +1500,12 @@ impl Worker {
interrupt_status,
in_queue,
out_queue,
state: WlState::new(wayland_path, vm_socket, use_transition_flags),
state: WlState::new(
wayland_path,
vm_socket,
use_transition_flags,
resource_bridge,
),
in_desc_chains: VecDeque::with_capacity(QUEUE_SIZE as usize),
}
}
@ -1561,17 +1680,21 @@ pub struct Wl {
kill_evt: Option<EventFd>,
wayland_path: PathBuf,
vm_socket: Option<UnixDatagram>,
resource_bridge: Option<ResourceRequestSocket>,
use_transition_flags: bool,
}
impl Wl {
pub fn new<P: AsRef<Path>>(wayland_path: P, vm_socket: UnixDatagram) -> Result<Wl> {
// let kill_evt = EventFd::new()?;
// workers_kill_evt: Some(kill_evt.try_clone()?),
pub fn new<P: AsRef<Path>>(
wayland_path: P,
vm_socket: UnixDatagram,
resource_bridge: Option<ResourceRequestSocket>,
) -> Result<Wl> {
Ok(Wl {
kill_evt: None,
wayland_path: wayland_path.as_ref().to_owned(),
vm_socket: Some(vm_socket),
resource_bridge,
use_transition_flags: false,
})
}
@ -1640,6 +1763,7 @@ impl VirtioDevice for Wl {
if let Some(vm_socket) = self.vm_socket.take() {
let wayland_path = self.wayland_path.clone();
let use_transition_flags = self.use_transition_flags;
let resource_bridge = self.resource_bridge.take();
let worker_result =
thread::Builder::new()
.name("virtio_wl".to_string())
@ -1654,6 +1778,7 @@ impl VirtioDevice for Wl {
wayland_path,
vm_socket,
use_transition_flags,
resource_bridge,
).run(queue_evts, kill_evt);
});

View file

@ -14,7 +14,13 @@ macro_rules! fourcc {
/// unrecognized.
pub fn pipe_format_fourcc(f: p_format::pipe_format) -> Option<u32> {
match f {
p_format::PIPE_FORMAT_B8G8R8A8_UNORM => fourcc!('A', 'R', '2', '4'),
p_format::PIPE_FORMAT_B8G8R8X8_UNORM => fourcc!('X', 'R', '2', '4'),
p_format::PIPE_FORMAT_R8G8B8A8_UNORM => fourcc!('A', 'B', '2', '4'),
p_format::PIPE_FORMAT_R8G8B8X8_UNORM => fourcc!('X', 'B', '2', '4'),
// p_format::PIPE_FORMAT_B5G6R5_UNORM => fourcc!('R', 'G', '1', '6'),
// p_format::PIPE_FORMAT_R8_UNORM => fourcc!('R', '8', ' ', ' '),
// p_format::PIPE_FORMAT_G8R8_UNORM => fourcc!('R', 'G', '8', '8'),
_ => None,
}
}

View file

@ -382,106 +382,23 @@ fn create_virtio_devs(
}
}
if let Some(wayland_socket_path) = cfg.wayland_socket_path.as_ref() {
let wayland_socket_dir = wayland_socket_path
.parent()
.ok_or(Error::InvalidWaylandPath)?;
let wayland_socket_name = wayland_socket_path
.file_name()
.ok_or(Error::InvalidWaylandPath)?;
let jailed_wayland_dir = Path::new("/wayland");
let jailed_wayland_path = jailed_wayland_dir.join(wayland_socket_name);
let wl_box = Box::new(
devices::virtio::Wl::new(
if cfg.multiprocess {
&jailed_wayland_path
} else {
wayland_socket_path.as_path()
},
wayland_device_socket,
).map_err(Error::WaylandDeviceNew)?,
);
let jail = if cfg.multiprocess {
let policy_path: PathBuf = cfg.seccomp_policy_dir.join("wl_device.policy");
let mut jail = create_base_minijail(empty_root_path, &policy_path)?;
// Create a tmpfs in the device's root directory so that we can bind mount the wayland
// socket directory into it. The size=67108864 is size=64*1024*1024 or size=64MB.
jail.mount_with_data(
Path::new("none"),
Path::new("/"),
"tmpfs",
(libc::MS_NOSUID | libc::MS_NODEV | libc::MS_NOEXEC) as usize,
"size=67108864",
).unwrap();
// Bind mount the wayland socket's directory into jail's root. This is necessary since
// each new wayland context must open() the socket. If the wayland socket is ever
// destroyed and remade in the same host directory, new connections will be possible
// without restarting the wayland device.
jail.mount_bind(wayland_socket_dir, jailed_wayland_dir, true)
.unwrap();
// Set the uid/gid for the jailed process, and give a basic id map. This
// is required for the above bind mount to work.
let crosvm_user_group = CStr::from_bytes_with_nul(b"crosvm\0").unwrap();
let crosvm_uid = match get_user_id(&crosvm_user_group) {
Ok(u) => u,
Err(e) => {
warn!("falling back to current user id for Wayland: {:?}", e);
geteuid()
}
};
let crosvm_gid = match get_group_id(&crosvm_user_group) {
Ok(u) => u,
Err(e) => {
warn!("falling back to current group id for Wayland: {:?}", e);
getegid()
}
};
jail.change_uid(crosvm_uid);
jail.change_gid(crosvm_gid);
jail.uidmap(&format!("{0} {0} 1", crosvm_uid))
.map_err(Error::SettingUidMap)?;
jail.gidmap(&format!("{0} {0} 1", crosvm_gid))
.map_err(Error::SettingGidMap)?;
Some(jail)
} else {
None
};
devs.push(VirtioDeviceStub { dev: wl_box, jail });
}
if let Some(cid) = cfg.cid {
let vsock_box = Box::new(
devices::virtio::vhost::Vsock::new(cid, &mem).map_err(Error::VhostVsockDeviceNew)?,
);
let jail = if cfg.multiprocess {
let policy_path: PathBuf = cfg.seccomp_policy_dir.join("vhost_vsock_device.policy");
Some(create_base_minijail(empty_root_path, &policy_path)?)
} else {
None
};
devs.push(VirtioDeviceStub {
dev: vsock_box,
jail,
});
}
#[cfg(feature = "gpu")]
let mut resource_bridge_wl_socket: Option<
devices::virtio::resource_bridge::ResourceRequestSocket,
> = None;
#[cfg(feature = "gpu")]
{
if cfg.gpu {
if let Some(wayland_socket_path) = cfg.wayland_socket_path.as_ref() {
let (wl_socket, gpu_socket) =
devices::virtio::resource_bridge::pair().map_err(Error::CreateSocket)?;
resource_bridge_wl_socket = Some(wl_socket);
let jailed_wayland_path = Path::new("/wayland-0");
let gpu_box = Box::new(devices::virtio::Gpu::new(
_exit_evt.try_clone().map_err(Error::CloneEventFd)?,
Some(gpu_socket),
if cfg.multiprocess {
&jailed_wayland_path
} else {
@ -557,6 +474,102 @@ fn create_virtio_devs(
}
}
if let Some(wayland_socket_path) = cfg.wayland_socket_path.as_ref() {
let wayland_socket_dir = wayland_socket_path
.parent()
.ok_or(Error::InvalidWaylandPath)?;
let wayland_socket_name = wayland_socket_path
.file_name()
.ok_or(Error::InvalidWaylandPath)?;
let jailed_wayland_dir = Path::new("/wayland");
let jailed_wayland_path = jailed_wayland_dir.join(wayland_socket_name);
#[cfg(not(feature = "gpu"))]
let resource_bridge_wl_socket = None;
let wl_box = Box::new(
devices::virtio::Wl::new(
if cfg.multiprocess {
&jailed_wayland_path
} else {
wayland_socket_path.as_path()
},
wayland_device_socket,
resource_bridge_wl_socket,
).map_err(Error::WaylandDeviceNew)?,
);
let jail = if cfg.multiprocess {
let policy_path: PathBuf = cfg.seccomp_policy_dir.join("wl_device.policy");
let mut jail = create_base_minijail(empty_root_path, &policy_path)?;
// Create a tmpfs in the device's root directory so that we can bind mount the wayland
// socket directory into it. The size=67108864 is size=64*1024*1024 or size=64MB.
jail.mount_with_data(
Path::new("none"),
Path::new("/"),
"tmpfs",
(libc::MS_NOSUID | libc::MS_NODEV | libc::MS_NOEXEC) as usize,
"size=67108864",
).unwrap();
// Bind mount the wayland socket's directory into jail's root. This is necessary since
// each new wayland context must open() the socket. If the wayland socket is ever
// destroyed and remade in the same host directory, new connections will be possible
// without restarting the wayland device.
jail.mount_bind(wayland_socket_dir, jailed_wayland_dir, true)
.unwrap();
// Set the uid/gid for the jailed process, and give a basic id map. This
// is required for the above bind mount to work.
let crosvm_user_group = CStr::from_bytes_with_nul(b"crosvm\0").unwrap();
let crosvm_uid = match get_user_id(&crosvm_user_group) {
Ok(u) => u,
Err(e) => {
warn!("falling back to current user id for Wayland: {:?}", e);
geteuid()
}
};
let crosvm_gid = match get_group_id(&crosvm_user_group) {
Ok(u) => u,
Err(e) => {
warn!("falling back to current group id for Wayland: {:?}", e);
getegid()
}
};
jail.change_uid(crosvm_uid);
jail.change_gid(crosvm_gid);
jail.uidmap(&format!("{0} {0} 1", crosvm_uid))
.map_err(Error::SettingUidMap)?;
jail.gidmap(&format!("{0} {0} 1", crosvm_gid))
.map_err(Error::SettingGidMap)?;
Some(jail)
} else {
None
};
devs.push(VirtioDeviceStub { dev: wl_box, jail });
}
if let Some(cid) = cfg.cid {
let vsock_box = Box::new(
devices::virtio::vhost::Vsock::new(cid, &mem).map_err(Error::VhostVsockDeviceNew)?,
);
let jail = if cfg.multiprocess {
let policy_path: PathBuf = cfg.seccomp_policy_dir.join("vhost_vsock_device.policy");
Some(create_base_minijail(empty_root_path, &policy_path)?)
} else {
None
};
devs.push(VirtioDeviceStub {
dev: vsock_box,
jail,
});
}
let chronos_user_group = CStr::from_bytes_with_nul(b"chronos\0").unwrap();
let chronos_uid = match get_user_id(&chronos_user_group) {
Ok(u) => u,