gpu_display: refactor event loop

There is a desire for Wayland and possibly other display backends to
reasonably handle input.  Move the event device logic inside the X11
backend up to the common layer to prevent duplication.

The common layer also keeps track of surfaces and external memory
objects to make this easier.  The GpuDisplaySurface/GpuDisplayMemory
traits are  introduced in case the common layer needs to perform
compositor specific operation.

BUG=b:173630595
TEST=compile and run with X11

Change-Id: Ied060a7cc216ac6c084030aad1fc839c022a3395
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2852523
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Commit-Queue: Gurchetan Singh <gurchetansingh@chromium.org>
This commit is contained in:
Gurchetan Singh 2021-04-26 16:40:48 -07:00 committed by Commit Bot
parent eca117b31e
commit 41daa354c7
9 changed files with 858 additions and 830 deletions

View file

@ -10,7 +10,7 @@ use std::result::Result;
use std::sync::Arc;
use crate::virtio::resource_bridge::{BufferInfo, PlaneInfo, ResourceInfo, ResourceResponse};
use base::{error, AsRawDescriptor, ExternalMapping, Tube};
use base::{error, ExternalMapping, Tube};
use data_model::VolatileSlice;
@ -181,9 +181,6 @@ impl VirtioGpu {
let mut display = self.display.borrow_mut();
let event_device_id = display.import_event_device(event_device)?;
if let Some(s) = self.scanout_surface_id {
display.attach_event_device(s, event_device_id)
}
self.event_devices.insert(event_device_id, scanout);
Ok(OkNoData)
}
@ -201,7 +198,12 @@ impl VirtioGpu {
/// Processes the internal `display` events and returns `true` if the main display was closed.
pub fn process_display(&mut self) -> bool {
let mut display = self.display.borrow_mut();
display.dispatch_events();
let result = display.dispatch_events();
match result {
Ok(_) => (),
Err(e) => error!("failed to dispatch events: {}", e),
}
self.scanout_surface_id
.map(|s| display.close_requested(s))
.unwrap_or(false)
@ -234,9 +236,6 @@ impl VirtioGpu {
let surface_id =
display.create_surface(None, self.display_width, self.display_height)?;
self.scanout_surface_id = Some(surface_id);
for event_device_id in self.event_devices.keys() {
display.attach_event_device(surface_id, *event_device_id);
}
}
Ok(OkNoData)
}
@ -299,8 +298,8 @@ impl VirtioGpu {
),
};
match self.display.borrow_mut().import_dmabuf(
dmabuf.os_handle.as_raw_descriptor(),
match self.display.borrow_mut().import_memory(
&dmabuf.os_handle,
offset,
stride,
query.modifier,
@ -327,7 +326,7 @@ impl VirtioGpu {
surface_id: u32,
) -> VirtioGpuResult {
if let Some(import_id) = self.import_to_display(resource_id) {
self.display.borrow_mut().flip_to(surface_id, import_id);
self.display.borrow_mut().flip_to(surface_id, import_id)?;
return Ok(OkNoData);
}
@ -385,13 +384,13 @@ impl VirtioGpu {
let cursor_surface_id = self.cursor_surface_id.unwrap();
self.display
.borrow_mut()
.set_position(cursor_surface_id, x, y);
.set_position(cursor_surface_id, x, y)?;
// Gets the resource's pixels into the display by importing the buffer.
if let Some(import_id) = self.import_to_display(resource_id) {
self.display
.borrow_mut()
.flip_to(cursor_surface_id, import_id);
.flip_to(cursor_surface_id, import_id)?;
return Ok(OkNoData);
}
@ -412,8 +411,8 @@ impl VirtioGpu {
if let Some(cursor_surface_id) = self.cursor_surface_id {
if let Some(scanout_surface_id) = self.scanout_surface_id {
let mut display = self.display.borrow_mut();
display.set_position(cursor_surface_id, x, y);
display.commit(scanout_surface_id);
display.set_position(cursor_surface_id, x, y)?;
display.commit(scanout_surface_id)?;
}
}
Ok(OkNoData)

View file

@ -8,8 +8,8 @@ fn main() {
let mut disp = GpuDisplay::open_wayland(None::<&str>).unwrap();
let surface_id = disp.create_surface(None, 1280, 1024).unwrap();
disp.flip(surface_id);
disp.commit(surface_id);
disp.commit(surface_id).unwrap();
while !disp.close_requested(surface_id) {
disp.dispatch_events();
disp.dispatch_events().unwrap();
}
}

View file

@ -20,6 +20,6 @@ fn main() {
disp.flip(surface_id);
while !disp.close_requested(surface_id) {
disp.dispatch_events();
disp.dispatch_events().unwrap();
}
}

View file

@ -58,6 +58,8 @@ struct output {
struct dwl_context {
struct wl_display *display;
struct dwl_surface *surfaces[MAX_BUFFER_COUNT];
struct dwl_dmabuf *dmabufs[MAX_BUFFER_COUNT];
struct interfaces ifaces;
bool output_added;
struct output outputs[8];
@ -71,8 +73,10 @@ struct dwl_context {
struct dwl_dmabuf {
uint32_t width;
uint32_t height;
uint32_t import_id;
bool in_use;
struct wl_buffer *buffer;
struct dwl_context *context;
};
struct dwl_surface {
@ -85,6 +89,7 @@ struct dwl_surface {
struct wl_subsurface *subsurface;
uint32_t width;
uint32_t height;
uint32_t surface_id;
double scale;
bool close_requested;
size_t buffer_count;
@ -499,10 +504,52 @@ static void dmabuf_buffer_release(void *data, struct wl_buffer *buffer)
static const struct wl_buffer_listener dmabuf_buffer_listener = {
.release = dmabuf_buffer_release};
struct dwl_dmabuf *dwl_context_dmabuf_new(struct dwl_context *self, int fd,
uint32_t offset, uint32_t stride,
uint64_t modifiers, uint32_t width,
uint32_t height, uint32_t fourcc)
static bool dwl_context_add_dmabuf(struct dwl_context *self,
struct dwl_dmabuf *dmabuf)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (!self->dmabufs[i]) {
self->dmabufs[i] = dmabuf;
return true;
}
}
return false;
}
static void dwl_context_remove_dmabuf(struct dwl_context *self,
uint32_t import_id)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (self->dmabufs[i] &&
self->dmabufs[i]->import_id == import_id) {
self->dmabufs[i] = NULL;
}
}
}
static struct dwl_dmabuf *dwl_context_get_dmabuf(struct dwl_context *self,
uint32_t import_id)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (self->dmabufs[i] &&
self->dmabufs[i]->import_id == import_id) {
return self->dmabufs[i];
}
}
return NULL;
}
struct dwl_dmabuf *dwl_context_dmabuf_new(struct dwl_context *self,
uint32_t import_id,
int fd, uint32_t offset,
uint32_t stride, uint64_t modifier,
uint32_t width, uint32_t height,
uint32_t fourcc)
{
struct dwl_dmabuf *dmabuf = calloc(1, sizeof(struct dwl_dmabuf));
if (!dmabuf) {
@ -524,8 +571,8 @@ struct dwl_dmabuf *dwl_context_dmabuf_new(struct dwl_context *self, int fd,
zwp_linux_buffer_params_v1_add_listener(params, &linux_buffer_listener,
dmabuf);
zwp_linux_buffer_params_v1_add(params, fd, 0 /* plane_idx */, offset,
stride, modifiers >> 32,
(uint32_t)modifiers);
stride, modifier >> 32,
(uint32_t)modifier);
zwp_linux_buffer_params_v1_create(params, width, height, fourcc, 0);
wl_display_roundtrip(self->display);
zwp_linux_buffer_params_v1_destroy(params);
@ -538,11 +585,20 @@ struct dwl_dmabuf *dwl_context_dmabuf_new(struct dwl_context *self, int fd,
wl_buffer_add_listener(dmabuf->buffer, &dmabuf_buffer_listener, dmabuf);
dmabuf->import_id = import_id;
dmabuf->context = self;
if (!dwl_context_add_dmabuf(self, dmabuf)) {
syslog(LOG_ERR, "failed to add dmabuf to context");
free(dmabuf);
return NULL;
}
return dmabuf;
}
void dwl_dmabuf_destroy(struct dwl_dmabuf **self)
{
dwl_context_remove_dmabuf((*self)->context, (*self)->import_id);
wl_buffer_destroy((*self)->buffer);
free(*self);
*self = NULL;
@ -565,8 +621,49 @@ static void surface_buffer_release(void *data, struct wl_buffer *buffer)
static const struct wl_buffer_listener surface_buffer_listener = {
.release = surface_buffer_release};
static struct dwl_surface *dwl_context_get_surface(struct dwl_context *self,
uint32_t surface_id)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (self->surfaces[i] &&
self->surfaces[i]->surface_id == surface_id) {
return self->surfaces[i];
}
}
return NULL;
}
static bool dwl_context_add_surface(struct dwl_context *self,
struct dwl_surface *surface)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (!self->surfaces[i]) {
self->surfaces[i] = surface;
return true;
}
}
return false;
}
static void dwl_context_remove_surface(struct dwl_context *self,
uint32_t surface_id)
{
size_t i;
for (i = 0; i < MAX_BUFFER_COUNT; i++) {
if (self->surfaces[i] &&
self->surfaces[i]->surface_id == surface_id) {
self->surfaces[i] = NULL;
}
}
}
struct dwl_surface *dwl_context_surface_new(struct dwl_context *self,
struct dwl_surface *parent,
uint32_t parent_id,
uint32_t surface_id,
int shm_fd, size_t shm_size,
size_t buffer_size, uint32_t width,
uint32_t height, uint32_t stride)
@ -632,7 +729,7 @@ struct dwl_surface *dwl_context_surface_new(struct dwl_context *self,
wl_region_add(region, 0, 0, width, height);
wl_surface_set_opaque_region(disp_surface->wl_surface, region);
if (!parent) {
if (!parent_id) {
disp_surface->xdg_surface = xdg_wm_base_get_xdg_surface(
self->ifaces.xdg_wm_base, disp_surface->wl_surface);
if (!disp_surface->xdg_surface) {
@ -672,9 +769,17 @@ struct dwl_surface *dwl_context_surface_new(struct dwl_context *self,
// wait for the surface to be configured
wl_display_roundtrip(self->display);
} else {
struct dwl_surface *parent_surface =
dwl_context_get_surface(self, parent_id);
if (!parent_surface) {
syslog(LOG_ERR, "failed to find parent_surface");
goto fail;
}
disp_surface->subsurface = wl_subcompositor_get_subsurface(
self->ifaces.subcompositor, disp_surface->wl_surface,
parent->wl_surface);
parent_surface->wl_surface);
if (!disp_surface->subsurface) {
syslog(LOG_ERR, "failed to make subsurface");
goto fail;
@ -716,6 +821,12 @@ struct dwl_surface *dwl_context_surface_new(struct dwl_context *self,
wl_surface_commit(disp_surface->wl_surface);
wl_display_flush(self->display);
disp_surface->surface_id = surface_id;
if (!dwl_context_add_surface(self, disp_surface)) {
syslog(LOG_ERR, "failed to add surface to context");
goto fail;
}
return disp_surface;
fail:
if (disp_surface->viewport)
@ -744,6 +855,8 @@ fail:
void dwl_surface_destroy(struct dwl_surface **self)
{
size_t i;
dwl_context_remove_surface((*self)->context, (*self)->surface_id);
if ((*self)->viewport)
wp_viewport_destroy((*self)->viewport);
if ((*self)->subsurface)
@ -790,8 +903,14 @@ void dwl_surface_flip(struct dwl_surface *self, size_t buffer_index)
self->buffer_use_bit_mask |= 1 << buffer_index;
}
void dwl_surface_flip_to(struct dwl_surface *self, struct dwl_dmabuf *dmabuf)
void dwl_surface_flip_to(struct dwl_surface *self, uint32_t import_id)
{
// Surface and dmabuf have to exist in same context.
struct dwl_dmabuf *dmabuf = dwl_context_get_dmabuf(self->context,
import_id);
if (!dmabuf)
return;
if (self->width != dmabuf->width || self->height != dmabuf->height)
return;
wl_surface_attach(self->wl_surface, dmabuf->buffer, 0, 0);
@ -814,3 +933,8 @@ void dwl_surface_set_position(struct dwl_surface *self, uint32_t x, uint32_t y)
wl_display_flush(self->context->display);
}
}
void *dwl_surface_descriptor(struct dwl_surface *self)
{
return self->wl_surface;
}

View file

@ -78,6 +78,7 @@ extern "C" {
extern "C" {
pub fn dwl_context_dmabuf_new(
self_: *mut dwl_context,
import_id: u32,
fd: ::std::os::raw::c_int,
offset: u32,
stride: u32,
@ -93,7 +94,8 @@ extern "C" {
extern "C" {
pub fn dwl_context_surface_new(
self_: *mut dwl_context,
parent: *mut dwl_surface,
parent_id: u32,
surface_id: u32,
shm_fd: ::std::os::raw::c_int,
shm_size: usize,
buffer_size: usize,
@ -115,7 +117,7 @@ extern "C" {
pub fn dwl_surface_flip(self_: *mut dwl_surface, buffer_index: usize);
}
extern "C" {
pub fn dwl_surface_flip_to(self_: *mut dwl_surface, dmabuf: *mut dwl_dmabuf);
pub fn dwl_surface_flip_to(self_: *mut dwl_surface, import_id: u32);
}
extern "C" {
pub fn dwl_surface_close_requested(self_: *const dwl_surface) -> bool;
@ -123,3 +125,6 @@ extern "C" {
extern "C" {
pub fn dwl_surface_set_position(self_: *mut dwl_surface, x: u32, y: u32);
}
extern "C" {
pub fn dwl_surface_descriptor(self_: *const dwl_surface) -> *const ::std::ffi::c_void;
}

View file

@ -2,16 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::collections::BTreeMap;
use std::num::NonZeroU32;
use crate::{DisplayT, EventDevice, GpuDisplayError, GpuDisplayFramebuffer};
use crate::{
DisplayT, GpuDisplayError, GpuDisplayFramebuffer, GpuDisplayResult, GpuDisplaySurface,
};
use base::{AsRawDescriptor, Event, RawDescriptor};
use data_model::VolatileSlice;
type SurfaceId = NonZeroU32;
#[allow(dead_code)]
struct Buffer {
width: u32,
@ -38,21 +35,13 @@ impl Buffer {
}
}
struct Surface {
struct StubSurface {
width: u32,
height: u32,
buffer: Option<Buffer>,
}
impl Surface {
fn create(width: u32, height: u32) -> Surface {
Surface {
width,
height,
buffer: None,
}
}
impl StubSurface {
/// Gets the buffer at buffer_index, allocating it if necessary.
fn lazily_allocate_buffer(&mut self) -> Option<&mut Buffer> {
if self.buffer.is_none() {
@ -70,8 +59,9 @@ impl Surface {
self.buffer.as_mut()
}
}
/// Gets the next framebuffer, allocating if necessary.
impl GpuDisplaySurface for StubSurface {
fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
let framebuffer = self.lazily_allocate_buffer()?;
let framebuffer_stride = framebuffer.stride() as u32;
@ -82,145 +72,42 @@ impl Surface {
framebuffer_bytes_per_pixel,
))
}
fn flip(&mut self) {}
}
impl Drop for Surface {
impl Drop for StubSurface {
fn drop(&mut self) {}
}
struct SurfacesHelper {
next_surface_id: SurfaceId,
surfaces: BTreeMap<SurfaceId, Surface>,
}
impl SurfacesHelper {
fn new() -> SurfacesHelper {
SurfacesHelper {
next_surface_id: SurfaceId::new(1).unwrap(),
surfaces: Default::default(),
}
}
fn create_surface(&mut self, width: u32, height: u32) -> u32 {
let new_surface = Surface::create(width, height);
let new_surface_id = self.next_surface_id;
self.surfaces.insert(new_surface_id, new_surface);
self.next_surface_id = SurfaceId::new(self.next_surface_id.get() + 1).unwrap();
new_surface_id.get()
}
fn get_surface(&mut self, surface_id: u32) -> Option<&mut Surface> {
SurfaceId::new(surface_id).and_then(move |id| self.surfaces.get_mut(&id))
}
fn destroy_surface(&mut self, surface_id: u32) {
SurfaceId::new(surface_id).and_then(|id| self.surfaces.remove(&id));
}
fn flip_surface(&mut self, surface_id: u32) {
if let Some(surface) = self.get_surface(surface_id) {
surface.flip();
}
}
}
pub struct DisplayStub {
/// This event is never triggered and is used solely to fulfill AsRawDescriptor.
event: Event,
surfaces: SurfacesHelper,
}
impl DisplayStub {
pub fn new() -> Result<DisplayStub, GpuDisplayError> {
pub fn new() -> GpuDisplayResult<DisplayStub> {
let event = Event::new().map_err(|_| GpuDisplayError::CreateEvent)?;
Ok(DisplayStub {
event,
surfaces: SurfacesHelper::new(),
})
Ok(DisplayStub { event })
}
}
impl DisplayT for DisplayStub {
fn dispatch_events(&mut self) {}
fn create_surface(
&mut self,
parent_surface_id: Option<u32>,
_surface_id: u32,
width: u32,
height: u32,
) -> Result<u32, GpuDisplayError> {
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
if parent_surface_id.is_some() {
return Err(GpuDisplayError::Unsupported);
}
Ok(self.surfaces.create_surface(width, height))
}
fn release_surface(&mut self, surface_id: u32) {
self.surfaces.destroy_surface(surface_id);
}
fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer> {
self.surfaces
.get_surface(surface_id)
.and_then(|s| s.framebuffer())
}
fn next_buffer_in_use(&self, _surface_id: u32) -> bool {
false
}
fn flip(&mut self, surface_id: u32) {
self.surfaces.flip_surface(surface_id);
}
fn close_requested(&self, _surface_id: u32) -> bool {
false
}
fn import_dmabuf(
&mut self,
_fd: RawDescriptor,
_offset: u32,
_stride: u32,
_modifiers: u64,
_width: u32,
_height: u32,
_fourcc: u32,
) -> Result<u32, GpuDisplayError> {
Err(GpuDisplayError::Unsupported)
}
fn release_import(&mut self, _import_id: u32) {
// unsupported
}
fn commit(&mut self, _surface_id: u32) {
// unsupported
}
fn flip_to(&mut self, _surface_id: u32, _import_id: u32) {
// unsupported
}
fn set_position(&mut self, _surface_id: u32, _x: u32, _y: u32) {
// unsupported
}
fn import_event_device(&mut self, _event_device: EventDevice) -> Result<u32, GpuDisplayError> {
Err(GpuDisplayError::Unsupported)
}
fn release_event_device(&mut self, _event_device_id: u32) {
// unsupported
}
fn attach_event_device(&mut self, _surface_id: u32, _event_device_id: u32) {
// unsupported
Ok(Box::new(StubSurface {
width,
height,
buffer: None,
}))
}
}

View file

@ -12,13 +12,15 @@ mod dwl;
use dwl::*;
use crate::{DisplayT, EventDevice, GpuDisplayError, GpuDisplayFramebuffer, GpuDisplayResult};
use crate::{
DisplayT, GpuDisplayError, GpuDisplayFramebuffer, GpuDisplayImport, GpuDisplayResult,
GpuDisplaySurface,
};
use std::cell::Cell;
use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::path::Path;
use std::ptr::{null, null_mut};
use std::ptr::null;
use base::{
round_up_to_page_size, AsRawDescriptor, MemoryMapping, MemoryMappingBuilder, RawDescriptor,
@ -43,6 +45,9 @@ impl Drop for DwlContext {
}
struct DwlDmabuf(*mut dwl_dmabuf);
impl GpuDisplayImport for DwlDmabuf {}
impl Drop for DwlDmabuf {
fn drop(&mut self) {
if !self.0.is_null() {
@ -68,7 +73,7 @@ impl Drop for DwlSurface {
}
}
struct Surface {
struct WaylandSurface {
surface: DwlSurface,
row_size: u32,
buffer_size: usize,
@ -76,21 +81,81 @@ struct Surface {
buffer_mem: MemoryMapping,
}
impl Surface {
impl WaylandSurface {
fn surface(&self) -> *mut dwl_surface {
self.surface.0
}
}
impl GpuDisplaySurface for WaylandSurface {
fn surface_descriptor(&self) -> u64 {
// Safe if the surface is valid.
let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
pointer as u64
}
fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
let framebuffer = self
.buffer_mem
.get_slice(buffer_index * self.buffer_size, self.buffer_size)
.ok()?;
Some(GpuDisplayFramebuffer::new(
framebuffer,
self.row_size,
BYTES_PER_PIXEL,
))
}
fn next_buffer_in_use(&self) -> bool {
let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
// Safe because only a valid surface and buffer index is used.
unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
}
fn close_requested(&self) -> bool {
// Safe because only a valid surface is used.
unsafe { dwl_surface_close_requested(self.surface()) }
}
fn flip(&mut self) {
self.buffer_index
.set((self.buffer_index.get() + 1) % BUFFER_COUNT);
// Safe because only a valid surface and buffer index is used.
unsafe {
dwl_surface_flip(self.surface(), self.buffer_index.get());
}
}
fn flip_to(&mut self, import_id: u32) {
// Safe because only a valid surface and import_id is used.
unsafe { dwl_surface_flip_to(self.surface(), import_id) }
}
fn commit(&mut self) -> GpuDisplayResult<()> {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_commit(self.surface());
}
Ok(())
}
fn set_position(&mut self, x: u32, y: u32) {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_set_position(self.surface(), x, y);
}
}
}
/// A connection to the compositor and associated collection of state.
///
/// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
/// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
pub struct DisplayWl {
dmabufs: HashMap<u32, DwlDmabuf>,
dmabuf_next_id: u32,
surfaces: HashMap<u32, Surface>,
surface_next_id: u32,
ctx: DwlContext,
}
@ -125,65 +190,16 @@ impl DisplayWl {
return Err(GpuDisplayError::Connect);
}
Ok(DisplayWl {
dmabufs: Default::default(),
dmabuf_next_id: 0,
surfaces: Default::default(),
surface_next_id: 0,
ctx,
})
Ok(DisplayWl { ctx })
}
fn ctx(&self) -> *mut dwl_context {
self.ctx.0
}
fn get_surface(&self, surface_id: u32) -> Option<&Surface> {
self.surfaces.get(&surface_id)
}
}
impl DisplayT for DisplayWl {
fn import_dmabuf(
&mut self,
fd: RawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> GpuDisplayResult<u32> {
// Safe given that the context pointer is valid. Any other invalid parameters would be
// rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
// before filing it away.
let dmabuf = DwlDmabuf(unsafe {
dwl_context_dmabuf_new(
self.ctx(),
fd,
offset,
stride,
modifiers,
width,
height,
fourcc,
)
});
if dmabuf.0.is_null() {
return Err(GpuDisplayError::FailedImport);
}
let next_id = self.dmabuf_next_id;
self.dmabufs.insert(next_id, dmabuf);
self.dmabuf_next_id += 1;
Ok(next_id)
}
fn release_import(&mut self, import_id: u32) {
self.dmabufs.remove(&import_id);
}
fn dispatch_events(&mut self) {
fn flush(&self) {
// Safe given that the context pointer is valid.
unsafe {
dwl_context_dispatch(self.ctx());
@ -193,16 +209,12 @@ impl DisplayT for DisplayWl {
fn create_surface(
&mut self,
parent_surface_id: Option<u32>,
surface_id: u32,
width: u32,
height: u32,
) -> GpuDisplayResult<u32> {
let parent_ptr = match parent_surface_id {
Some(id) => match self.get_surface(id).map(|p| p.surface()) {
Some(ptr) => ptr,
None => return Err(GpuDisplayError::InvalidSurfaceId),
},
None => null_mut(),
};
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
let parent_id = parent_surface_id.unwrap_or(0);
let row_size = width * BYTES_PER_PIXEL;
let fb_size = row_size * height;
let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
@ -212,12 +224,13 @@ impl DisplayT for DisplayWl {
.build()
.unwrap();
// Safe because only a valid context, parent pointer (if not None), and buffer FD are used.
// Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
// The returned surface is checked for validity before being filed away.
let surface = DwlSurface(unsafe {
dwl_context_surface_new(
self.ctx(),
parent_ptr,
parent_id,
surface_id,
buffer_shm.as_raw_descriptor(),
buffer_size,
fb_size as usize,
@ -231,123 +244,48 @@ impl DisplayT for DisplayWl {
return Err(GpuDisplayError::CreateSurface);
}
let next_id = self.surface_next_id;
self.surfaces.insert(
next_id,
Surface {
Ok(Box::new(WaylandSurface {
surface,
row_size,
buffer_size: fb_size as usize,
buffer_index: Cell::new(0),
buffer_mem,
},
);
self.surface_next_id += 1;
Ok(next_id)
}))
}
fn release_surface(&mut self, surface_id: u32) {
self.surfaces.remove(&surface_id);
fn import_memory(
&mut self,
import_id: u32,
descriptor: &dyn AsRawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> GpuDisplayResult<Box<dyn GpuDisplayImport>> {
// Safe given that the context pointer is valid. Any other invalid parameters would be
// rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
// before filing it away.
let dmabuf = DwlDmabuf(unsafe {
dwl_context_dmabuf_new(
self.ctx(),
import_id,
descriptor.as_raw_descriptor(),
offset,
stride,
modifiers,
width,
height,
fourcc,
)
});
if dmabuf.0.is_null() {
return Err(GpuDisplayError::FailedImport);
}
fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer> {
let surface = self.get_surface(surface_id)?;
let buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
let framebuffer = surface
.buffer_mem
.get_slice(buffer_index * surface.buffer_size, surface.buffer_size)
.ok()?;
Some(GpuDisplayFramebuffer::new(
framebuffer,
surface.row_size,
BYTES_PER_PIXEL,
))
}
fn commit(&mut self, surface_id: u32) {
match self.get_surface(surface_id) {
Some(surface) => {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_commit(surface.surface());
}
}
None => debug_assert!(false, "invalid surface_id {}", surface_id),
}
}
fn next_buffer_in_use(&self, surface_id: u32) -> bool {
match self.get_surface(surface_id) {
Some(surface) => {
let next_buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
// Safe because only a valid surface and buffer index is used.
unsafe { dwl_surface_buffer_in_use(surface.surface(), next_buffer_index) }
}
None => {
debug_assert!(false, "invalid surface_id {}", surface_id);
false
}
}
}
fn flip(&mut self, surface_id: u32) {
match self.get_surface(surface_id) {
Some(surface) => {
surface
.buffer_index
.set((surface.buffer_index.get() + 1) % BUFFER_COUNT);
// Safe because only a valid surface and buffer index is used.
unsafe {
dwl_surface_flip(surface.surface(), surface.buffer_index.get());
}
}
None => debug_assert!(false, "invalid surface_id {}", surface_id),
}
}
fn flip_to(&mut self, surface_id: u32, import_id: u32) {
match self.get_surface(surface_id) {
Some(surface) => {
match self.dmabufs.get(&import_id) {
// Safe because only a valid surface and dmabuf is used.
Some(dmabuf) => unsafe { dwl_surface_flip_to(surface.surface(), dmabuf.0) },
None => debug_assert!(false, "invalid import_id {}", import_id),
}
}
None => debug_assert!(false, "invalid surface_id {}", surface_id),
}
}
fn close_requested(&self, surface_id: u32) -> bool {
match self.get_surface(surface_id) {
Some(surface) =>
// Safe because only a valid surface is used.
unsafe { dwl_surface_close_requested(surface.surface()) }
None => false,
}
}
fn set_position(&mut self, surface_id: u32, x: u32, y: u32) {
match self.get_surface(surface_id) {
Some(surface) => {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_set_position(surface.surface(), x, y);
}
}
None => debug_assert!(false, "invalid surface_id {}", surface_id),
}
}
fn import_event_device(&mut self, _event_device: EventDevice) -> GpuDisplayResult<u32> {
Err(GpuDisplayError::Unsupported)
}
fn release_event_device(&mut self, _event_device_id: u32) {
// unsupported
}
fn attach_event_device(&mut self, _surface_id: u32, _event_device_id: u32) {
// unsupported
Ok(Box::new(dmabuf))
}
}

View file

@ -13,29 +13,25 @@ mod xlib;
use linux_input_sys::virtio_input_event;
use std::cmp::max;
use std::collections::BTreeMap;
use std::ffi::{c_void, CStr, CString};
use std::mem::{transmute_copy, zeroed};
use std::num::NonZeroU32;
use std::os::raw::c_ulong;
use std::ptr::{null, null_mut, NonNull};
use std::rc::Rc;
use std::time::Duration;
use libc::{shmat, shmctl, shmdt, shmget, IPC_CREAT, IPC_PRIVATE, IPC_RMID};
use crate::{
keycode_converter::KeycodeTranslator, keycode_converter::KeycodeTypes, DisplayT, EventDevice,
EventDeviceKind, GpuDisplayError, GpuDisplayFramebuffer, GpuDisplayResult,
keycode_converter::KeycodeTranslator, keycode_converter::KeycodeTypes, DisplayT,
EventDeviceKind, GpuDisplayError, GpuDisplayEvents, GpuDisplayFramebuffer, GpuDisplayResult,
GpuDisplaySurface,
};
use base::{error, AsRawDescriptor, EventType, PollToken, RawDescriptor, WaitContext};
use base::{AsRawDescriptor, RawDescriptor};
use data_model::VolatileSlice;
const BUFFER_COUNT: usize = 2;
type ObjectId = NonZeroU32;
/// A wrapper for XFree that takes any type.
unsafe fn x_free<T>(t: *mut T) {
xlib::XFree(t as *mut c_void);
@ -54,10 +50,18 @@ impl Drop for XDisplay {
}
impl XDisplay {
/// Returns a pointer to the X display object.
fn as_ptr(&self) -> *mut xlib::Display {
self.0.as_ptr()
}
/// Sends any pending commands to the X server.
fn flush(&self) {
unsafe {
xlib::XFlush(self.as_ptr());
}
}
/// Returns true of the XShm extension is supported on this display.
fn supports_shm(&self) -> bool {
unsafe { xlib::XShmQueryExtension(self.as_ptr()) != 0 }
@ -70,18 +74,6 @@ impl XDisplay {
})?))
}
/// Returns true if there are events that are on the queue.
fn pending_events(&self) -> bool {
unsafe { xlib::XPending(self.as_ptr()) != 0 }
}
/// Sends any pending commands to the X server.
fn flush(&self) {
unsafe {
xlib::XFlush(self.as_ptr());
}
}
/// Blocks until the next event from the display is received and returns that event.
///
/// Always flush before using this if any X commands where issued.
@ -215,7 +207,7 @@ impl Buffer {
}
// Surfaces here are equivalent to XWindows.
struct Surface {
struct XSurface {
display: XDisplay,
visual: *mut xlib::Visual,
depth: u32,
@ -223,8 +215,6 @@ struct Surface {
gc: xlib::GC,
width: u32,
height: u32,
event_devices: BTreeMap<ObjectId, EventDevice>,
keycode_translator: KeycodeTranslator,
// Fields for handling the buffer swap chain.
buffers: [Option<Buffer>; BUFFER_COUNT],
@ -236,94 +226,7 @@ struct Surface {
close_requested: bool,
}
impl Surface {
fn create(
display: XDisplay,
screen: &XScreen,
visual: *mut xlib::Visual,
width: u32,
height: u32,
) -> Surface {
let keycode_translator = KeycodeTranslator::new(KeycodeTypes::XkbScancode);
unsafe {
let depth = xlib::XDefaultDepthOfScreen(screen.as_ptr()) as u32;
let black_pixel = xlib::XBlackPixelOfScreen(screen.as_ptr());
let window = xlib::XCreateSimpleWindow(
display.as_ptr(),
xlib::XRootWindowOfScreen(screen.as_ptr()),
0,
0,
width,
height,
1,
black_pixel,
black_pixel,
);
let gc = xlib::XCreateGC(display.as_ptr(), window, 0, null_mut());
// Because the event is from an extension, its type must be calculated dynamically.
let buffer_completion_type =
xlib::XShmGetEventBase(display.as_ptr()) as u32 + xlib::ShmCompletion;
// Mark this window as responding to close requests.
let mut delete_window_atom = xlib::XInternAtom(
display.as_ptr(),
CStr::from_bytes_with_nul(b"WM_DELETE_WINDOW\0")
.unwrap()
.as_ptr(),
0,
);
xlib::XSetWMProtocols(display.as_ptr(), window, &mut delete_window_atom, 1);
let size_hints = xlib::XAllocSizeHints();
(*size_hints).flags = (xlib::PMinSize | xlib::PMaxSize) as i64;
(*size_hints).max_width = width as i32;
(*size_hints).min_width = width as i32;
(*size_hints).max_height = height as i32;
(*size_hints).min_height = height as i32;
xlib::XSetWMNormalHints(display.as_ptr(), window, size_hints);
x_free(size_hints);
// We will use redraw the buffer when we are exposed.
xlib::XSelectInput(
display.as_ptr(),
window,
(xlib::ExposureMask
| xlib::KeyPressMask
| xlib::KeyReleaseMask
| xlib::ButtonPressMask
| xlib::ButtonReleaseMask
| xlib::PointerMotionMask) as i64,
);
xlib::XClearWindow(display.as_ptr(), window);
xlib::XMapRaised(display.as_ptr(), window);
// Flush everything so that the window is visible immediately.
display.flush();
Surface {
display,
visual,
depth,
window,
gc,
width,
height,
event_devices: Default::default(),
keycode_translator,
buffers: Default::default(),
buffer_next: 0,
buffer_completion_type,
delete_window_atom,
close_requested: false,
}
}
}
impl XSurface {
/// Returns index of the current (on-screen) buffer, or 0 if there are no buffers.
fn current_buffer(&self) -> usize {
match self.buffer_next.checked_sub(1) {
@ -332,77 +235,6 @@ impl Surface {
}
}
fn dispatch_to_event_devices(
&mut self,
events: &[virtio_input_event],
device_type: EventDeviceKind,
) {
for event_device in self.event_devices.values_mut() {
if event_device.kind() != device_type {
continue;
}
if let Err(e) = event_device.send_report(events.iter().cloned()) {
error!("error sending events to event device: {}", e);
}
}
}
fn handle_event(&mut self, ev: XEvent) {
match ev.as_enum(self.buffer_completion_type) {
XEventEnum::KeyEvent(key) => {
if let Some(linux_keycode) = self.keycode_translator.translate(key.keycode) {
let events = &[virtio_input_event::key(
linux_keycode,
key.type_ == xlib::KeyPress as i32,
)];
self.dispatch_to_event_devices(events, EventDeviceKind::Keyboard);
}
}
XEventEnum::ButtonEvent {
event: button_event,
pressed,
} => {
// We only support a single touch from button 1 (left mouse button).
if button_event.button & xlib::Button1 != 0 {
// The touch event *must* be first per the Linux input subsystem's guidance.
let events = &[
virtio_input_event::touch(pressed),
virtio_input_event::absolute_x(max(0, button_event.x)),
virtio_input_event::absolute_y(max(0, button_event.y)),
];
self.dispatch_to_event_devices(events, EventDeviceKind::Touchscreen);
}
}
XEventEnum::Motion(motion) => {
if motion.state & xlib::Button1Mask != 0 {
let events = &[
virtio_input_event::touch(true),
virtio_input_event::absolute_x(max(0, motion.x)),
virtio_input_event::absolute_y(max(0, motion.y)),
];
self.dispatch_to_event_devices(events, EventDeviceKind::Touchscreen);
}
}
XEventEnum::Expose => self.draw_buffer(self.current_buffer()),
XEventEnum::ClientMessage(xclient_data) => {
if xclient_data == self.delete_window_atom {
self.close_requested = true;
}
}
XEventEnum::ShmCompletionEvent(shmseg) => {
// Find the buffer associated with this event and mark it as not in use.
for buffer_opt in self.buffers.iter_mut() {
if let Some(buffer) = buffer_opt {
if buffer.segment_info.shmseg == shmseg {
buffer.in_use = false;
}
}
}
}
XEventEnum::Unhandled => {}
}
}
/// Draws the indicated buffer onto the screen.
fn draw_buffer(&mut self, buffer_index: usize) {
let buffer = match self.buffers.get_mut(buffer_index) {
@ -493,8 +325,13 @@ impl Surface {
self.buffers[buffer_index].as_ref()
}
}
}
impl GpuDisplaySurface for XSurface {
fn surface_descriptor(&self) -> u64 {
self.window as u64
}
/// Gets the next framebuffer, allocating if necessary.
fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
// Framebuffers are lazily allocated. If the next buffer is not in self.buffers, add it
// using push_new_buffer and then get its memory.
@ -507,7 +344,6 @@ impl Surface {
))
}
/// True if the next buffer is in use because of an XShmPutImage call.
fn next_buffer_in_use(&self) -> bool {
// Buffers that have not yet been made are not in use, hence unwrap_or(false).
self.buffers
@ -516,15 +352,42 @@ impl Surface {
.unwrap_or(false)
}
/// Puts the next buffer onto the screen and sets the next buffer in the swap chain.
fn close_requested(&self) -> bool {
self.close_requested
}
fn flip(&mut self) {
let current_buffer_index = self.buffer_next;
self.buffer_next = (self.buffer_next + 1) % self.buffers.len();
self.draw_buffer(current_buffer_index);
}
fn buffer_completion_type(&self) -> u32 {
self.buffer_completion_type
}
fn draw_current_buffer(&mut self) {
self.draw_buffer(self.current_buffer())
}
fn on_client_message(&mut self, client_data: u64) {
if client_data == self.delete_window_atom {
self.close_requested = true;
}
}
fn on_shm_completion(&mut self, shm_complete: u64) {
for buffer_opt in self.buffers.iter_mut() {
if let Some(buffer) = buffer_opt {
if buffer.segment_info.shmseg == shm_complete {
buffer.in_use = false;
}
}
}
}
}
impl Drop for Surface {
impl Drop for XSurface {
fn drop(&mut self) {
// Safe given it should always be of the correct type.
unsafe {
@ -534,32 +397,24 @@ impl Drop for Surface {
}
}
#[derive(PollToken)]
enum DisplayXPollToken {
Display,
EventDevice { event_device_id: u32 },
}
pub struct DisplayX {
wait_ctx: WaitContext<DisplayXPollToken>,
display: XDisplay,
screen: XScreen,
visual: *mut xlib::Visual,
next_id: ObjectId,
surfaces: BTreeMap<ObjectId, Surface>,
event_devices: BTreeMap<ObjectId, EventDevice>,
keycode_translator: KeycodeTranslator,
current_event: Option<XEvent>,
}
impl DisplayX {
pub fn open_display(display: Option<&str>) -> GpuDisplayResult<DisplayX> {
let wait_ctx = WaitContext::new()?;
let display_cstr = match display.map(CString::new) {
Some(Ok(s)) => Some(s),
Some(Err(_)) => return Err(GpuDisplayError::InvalidPath),
None => None,
};
let keycode_translator = KeycodeTranslator::new(KeycodeTypes::XkbScancode);
unsafe {
// Open the display
let display = match NonNull::new(xlib::XOpenDisplay(
@ -572,8 +427,6 @@ impl DisplayX {
None => return Err(GpuDisplayError::Connect),
};
wait_ctx.add(&display, DisplayXPollToken::Display)?;
// Check for required extension.
if !display.supports_shm() {
return Err(GpuDisplayError::RequiredFeature("xshm extension"));
@ -615,224 +468,192 @@ impl DisplayX {
x_free(visual_info);
Ok(DisplayX {
wait_ctx,
display,
screen,
visual,
next_id: ObjectId::new(1).unwrap(),
surfaces: Default::default(),
event_devices: Default::default(),
keycode_translator,
current_event: None,
})
}
}
fn surface_ref(&self, surface_id: u32) -> Option<&Surface> {
ObjectId::new(surface_id).and_then(move |id| self.surfaces.get(&id))
}
fn surface_mut(&mut self, surface_id: u32) -> Option<&mut Surface> {
ObjectId::new(surface_id).and_then(move |id| self.surfaces.get_mut(&id))
}
fn event_device(&self, event_device_id: u32) -> Option<&EventDevice> {
ObjectId::new(event_device_id).and_then(move |id| self.event_devices.get(&id))
}
fn event_device_mut(&mut self, event_device_id: u32) -> Option<&mut EventDevice> {
ObjectId::new(event_device_id).and_then(move |id| self.event_devices.get_mut(&id))
}
fn handle_event(&mut self, ev: XEvent) {
let window = ev.window();
for surface in self.surfaces.values_mut() {
if surface.window != window {
continue;
}
surface.handle_event(ev);
return;
}
}
fn dispatch_display_events(&mut self) {
loop {
self.display.flush();
if !self.display.pending_events() {
break;
}
let ev = self.display.next_event();
self.handle_event(ev);
}
}
fn handle_event_device(&mut self, event_device_id: u32) {
if let Some(event_device) = self.event_device(event_device_id) {
// TODO(zachr): decode the event and forward to the device.
let _ = event_device.recv_event_encoded();
}
}
fn handle_poll_ctx(&mut self) -> base::Result<()> {
let wait_events = self.wait_ctx.wait_timeout(Duration::default())?;
for wait_event in wait_events.iter().filter(|e| e.is_writable) {
if let DisplayXPollToken::EventDevice { event_device_id } = wait_event.token {
if let Some(event_device) = self.event_device_mut(event_device_id) {
if !event_device.flush_buffered_events()? {
continue;
}
}
// Although this looks exactly like the previous if-block, we need to reborrow self
// as immutable in order to make use of self.wait_ctx.
if let Some(event_device) = self.event_device(event_device_id) {
self.wait_ctx.modify(
event_device,
EventType::Read,
DisplayXPollToken::EventDevice { event_device_id },
)?;
}
}
}
for wait_event in wait_events.iter().filter(|e| e.is_readable) {
match wait_event.token {
DisplayXPollToken::Display => self.dispatch_display_events(),
DisplayXPollToken::EventDevice { event_device_id } => {
self.handle_event_device(event_device_id)
}
}
}
Ok(())
}
}
impl DisplayT for DisplayX {
fn dispatch_events(&mut self) {
if let Err(e) = self.handle_poll_ctx() {
error!("failed to dispatch events: {}", e);
fn pending_events(&self) -> bool {
unsafe { xlib::XPending(self.display.as_ptr()) != 0 }
}
fn flush(&self) {
self.display.flush();
}
fn next_event(&mut self) -> GpuDisplayResult<u64> {
let ev = self.display.next_event();
let descriptor = ev.window() as u64;
self.current_event = Some(ev);
Ok(descriptor)
}
fn handle_next_event(
&mut self,
surface: &mut Box<dyn GpuDisplaySurface>,
) -> Option<GpuDisplayEvents> {
// Should not panic since the common layer only calls this when an event exists.
let ev = self.current_event.take().unwrap();
match ev.as_enum(surface.buffer_completion_type()) {
XEventEnum::KeyEvent(key) => {
if let Some(linux_keycode) = self.keycode_translator.translate(key.keycode) {
let events = vec![virtio_input_event::key(
linux_keycode,
key.type_ == xlib::KeyPress as i32,
)];
return Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Keyboard,
});
}
}
XEventEnum::ButtonEvent {
event: button_event,
pressed,
} => {
// We only support a single touch from button 1 (left mouse button).
if button_event.button & xlib::Button1 != 0 {
// The touch event *must* be first per the Linux input subsystem's guidance.
let events = vec![
virtio_input_event::touch(pressed),
virtio_input_event::absolute_x(max(0, button_event.x)),
virtio_input_event::absolute_y(max(0, button_event.y)),
];
return Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Touchscreen,
});
}
}
XEventEnum::Motion(motion) => {
if motion.state & xlib::Button1Mask != 0 {
let events = vec![
virtio_input_event::touch(true),
virtio_input_event::absolute_x(max(0, motion.x)),
virtio_input_event::absolute_y(max(0, motion.y)),
];
return Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Touchscreen,
});
}
}
XEventEnum::Expose => surface.draw_current_buffer(),
XEventEnum::ClientMessage(xclient_data) => {
surface.on_client_message(xclient_data);
return None;
}
XEventEnum::ShmCompletionEvent(shmseg) => {
surface.on_shm_completion(shmseg);
return None;
}
XEventEnum::Unhandled => return None,
}
None
}
fn create_surface(
&mut self,
parent_surface_id: Option<u32>,
_surface_id: u32,
width: u32,
height: u32,
) -> GpuDisplayResult<u32> {
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
if parent_surface_id.is_some() {
return Err(GpuDisplayError::Unsupported);
}
let new_surface = Surface::create(
self.display.clone(),
&self.screen,
self.visual,
unsafe {
let depth = xlib::XDefaultDepthOfScreen(self.screen.as_ptr()) as u32;
let black_pixel = xlib::XBlackPixelOfScreen(self.screen.as_ptr());
let window = xlib::XCreateSimpleWindow(
self.display.as_ptr(),
xlib::XRootWindowOfScreen(self.screen.as_ptr()),
0,
0,
width,
height,
1,
black_pixel,
black_pixel,
);
let new_surface_id = self.next_id;
self.surfaces.insert(new_surface_id, new_surface);
self.next_id = ObjectId::new(self.next_id.get() + 1).unwrap();
Ok(new_surface_id.get())
}
let gc = xlib::XCreateGC(self.display.as_ptr(), window, 0, null_mut());
fn release_surface(&mut self, surface_id: u32) {
if let Some(mut surface) =
ObjectId::new(surface_id).and_then(|id| self.surfaces.remove(&id))
{
self.event_devices.append(&mut surface.event_devices);
}
}
// Because the event is from an extension, its type must be calculated dynamically.
let buffer_completion_type =
xlib::XShmGetEventBase(self.display.as_ptr()) as u32 + xlib::ShmCompletion;
fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer> {
self.surface_mut(surface_id).and_then(|s| s.framebuffer())
}
// Mark this window as responding to close requests.
let mut delete_window_atom = xlib::XInternAtom(
self.display.as_ptr(),
CStr::from_bytes_with_nul(b"WM_DELETE_WINDOW\0")
.unwrap()
.as_ptr(),
0,
);
xlib::XSetWMProtocols(self.display.as_ptr(), window, &mut delete_window_atom, 1);
fn next_buffer_in_use(&self, surface_id: u32) -> bool {
self.surface_ref(surface_id)
.map(|s| s.next_buffer_in_use())
.unwrap_or(false)
}
let size_hints = xlib::XAllocSizeHints();
(*size_hints).flags = (xlib::PMinSize | xlib::PMaxSize) as i64;
(*size_hints).max_width = width as i32;
(*size_hints).min_width = width as i32;
(*size_hints).max_height = height as i32;
(*size_hints).min_height = height as i32;
xlib::XSetWMNormalHints(self.display.as_ptr(), window, size_hints);
x_free(size_hints);
fn flip(&mut self, surface_id: u32) {
if let Some(surface) = self.surface_mut(surface_id) {
surface.flip()
}
}
// We will use redraw the buffer when we are exposed.
xlib::XSelectInput(
self.display.as_ptr(),
window,
(xlib::ExposureMask
| xlib::KeyPressMask
| xlib::KeyReleaseMask
| xlib::ButtonPressMask
| xlib::ButtonReleaseMask
| xlib::PointerMotionMask) as i64,
);
fn close_requested(&self, surface_id: u32) -> bool {
self.surface_ref(surface_id)
.map(|s| s.close_requested)
.unwrap_or(true)
}
xlib::XClearWindow(self.display.as_ptr(), window);
xlib::XMapRaised(self.display.as_ptr(), window);
#[allow(unused_variables)]
fn import_dmabuf(
&mut self,
fd: RawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> Result<u32, GpuDisplayError> {
Err(GpuDisplayError::Unsupported)
}
#[allow(unused_variables)]
fn release_import(&mut self, import_id: u32) {
// unsupported
}
#[allow(unused_variables)]
fn commit(&mut self, surface_id: u32) {
// unsupported
}
#[allow(unused_variables)]
fn flip_to(&mut self, surface_id: u32, import_id: u32) {
// unsupported
}
#[allow(unused_variables)]
fn set_position(&mut self, surface_id: u32, x: u32, y: u32) {
// unsupported
}
// Flush everything so that the window is visible immediately.
self.display.flush();
fn import_event_device(&mut self, event_device: EventDevice) -> GpuDisplayResult<u32> {
let new_event_device_id = self.next_id;
self.wait_ctx.add(
&event_device,
DisplayXPollToken::EventDevice {
event_device_id: new_event_device_id.get(),
},
)?;
self.event_devices.insert(new_event_device_id, event_device);
self.next_id = ObjectId::new(self.next_id.get() + 1).unwrap();
Ok(new_event_device_id.get())
Ok(Box::new(XSurface {
display: self.display.clone(),
visual: self.visual,
depth,
window,
gc,
width,
height,
buffers: Default::default(),
buffer_next: 0,
buffer_completion_type,
delete_window_atom,
close_requested: false,
}))
}
fn release_event_device(&mut self, event_device_id: u32) {
ObjectId::new(event_device_id).and_then(|id| self.event_devices.remove(&id));
}
fn attach_event_device(&mut self, surface_id: u32, event_device_id: u32) {
let event_device_id = match ObjectId::new(event_device_id) {
Some(id) => id,
None => return,
};
let surface_id = match ObjectId::new(surface_id) {
Some(id) => id,
None => return,
};
let surface = self.surfaces.get_mut(&surface_id).unwrap();
let event_device = self.event_devices.remove(&event_device_id).unwrap();
surface.event_devices.insert(event_device_id, event_device);
}
}
impl AsRawDescriptor for DisplayX {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.wait_ctx.as_raw_descriptor()
self.display.as_raw_descriptor()
}
}

View file

@ -2,12 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Crate for displaying simple surfaces and GPU buffers over wayland.
//! Crate for displaying simple surfaces and GPU buffers over a low-level display backend such as
//! Wayland or X.
use std::collections::BTreeMap;
use std::fmt::{self, Display};
use std::io::Error as IoError;
use std::path::Path;
use std::time::Duration;
use base::{AsRawDescriptor, Error as BaseError, RawDescriptor};
use base::{AsRawDescriptor, Error as BaseError, EventType, PollToken, RawDescriptor, WaitContext};
use data_model::VolatileSlice;
mod event_device;
@ -19,6 +23,7 @@ mod gpu_display_x;
mod keycode_converter;
pub use event_device::{EventDevice, EventDeviceKind};
use linux_input_sys::virtio_input_event;
/// An error generated by `GpuDisplay`.
#[derive(Debug)]
@ -35,12 +40,16 @@ pub enum GpuDisplayError {
CreateSurface,
/// Failed to import a buffer to the compositor.
FailedImport,
/// The surface ID is invalid.
InvalidSurfaceId,
/// A required feature was missing.
RequiredFeature(&'static str),
/// The path is invalid.
InvalidPath,
/// The import ID is invalid.
InvalidImportId,
/// The surface ID is invalid.
InvalidSurfaceId,
/// An input/output error occured.
IoError(IoError),
/// A required feature was missing.
RequiredFeature(&'static str),
/// The method is unsupported by the implementation.
Unsupported,
}
@ -59,7 +68,9 @@ impl Display for GpuDisplayError {
CreateSurface => write!(f, "failed to crate surface on the compositor"),
FailedImport => write!(f, "failed to import a buffer to the compositor"),
InvalidPath => write!(f, "invalid path"),
InvalidImportId => write!(f, "invalid import ID"),
InvalidSurfaceId => write!(f, "invalid surface ID"),
IoError(e) => write!(f, "an input/output error occur: {}", e),
RequiredFeature(feature) => write!(f, "required feature was missing: {}", feature),
Unsupported => write!(f, "unsupported by the implementation"),
}
@ -72,6 +83,19 @@ impl From<BaseError> for GpuDisplayError {
}
}
impl From<IoError> for GpuDisplayError {
fn from(e: IoError) -> GpuDisplayError {
GpuDisplayError::IoError(e)
}
}
/// Poll token for display instances
#[derive(PollToken)]
pub enum DisplayPollToken {
Display,
EventDevice { event_device_id: u32 },
}
#[derive(Clone)]
pub struct GpuDisplayFramebuffer<'a> {
framebuffer: VolatileSlice<'a>,
@ -127,47 +151,124 @@ impl<'a> GpuDisplayFramebuffer<'a> {
}
}
/// Empty trait, just used as a bounds for now
trait GpuDisplayImport {}
trait GpuDisplaySurface {
/// Returns an unique ID associated with the surface. This is typically generated by the
/// compositor or cast of a raw pointer.
fn surface_descriptor(&self) -> u64 {
0
}
/// Returns the next framebuffer, allocating if necessary.
fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>;
/// Returns true if the next buffer in the swapchain is already in use.
fn next_buffer_in_use(&self) -> bool {
false
}
/// Returns true if the surface should be closed.
fn close_requested(&self) -> bool {
false
}
/// Puts the next buffer on the screen, making it the current buffer.
fn flip(&mut self) {
// no-op
}
/// Puts the specified import_id on the screen.
fn flip_to(&mut self, _import_id: u32) {
// no-op
}
/// Commits the surface to the compositor.
fn commit(&mut self) -> GpuDisplayResult<()> {
Ok(())
}
/// Sets the position of the identified subsurface relative to its parent.
fn set_position(&mut self, _x: u32, _y: u32) {
// no-op
}
/// Returns the type of the completed buffer.
fn buffer_completion_type(&self) -> u32 {
0
}
/// Draws the current buffer on the screen.
fn draw_current_buffer(&mut self) {
// no-op
}
/// Handles a compositor-specific client event.
fn on_client_message(&mut self, _client_data: u64) {
// no-op
}
/// Handles a compositor-specific shared memory completion event.
fn on_shm_completion(&mut self, _shm_complete: u64) {
// no-op
}
}
struct GpuDisplayEvents {
events: Vec<virtio_input_event>,
device_type: EventDeviceKind,
}
trait DisplayT: AsRawDescriptor {
fn import_dmabuf(
/// Returns true if there are events that are on the queue.
fn pending_events(&self) -> bool {
false
}
/// Sends any pending commands to the compositor.
fn flush(&self) {
// no-op
}
/// Returns the surface descirptor associated with the current event
fn next_event(&mut self) -> GpuDisplayResult<u64> {
Ok(0)
}
/// Handles the event from the compositor, and returns an list of events
fn handle_next_event(
&mut self,
fd: RawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> Result<u32, GpuDisplayError>;
fn release_import(&mut self, import_id: u32);
fn dispatch_events(&mut self);
_surface: &mut Box<dyn GpuDisplaySurface>,
) -> Option<GpuDisplayEvents> {
None
}
/// Creates a surface with the given parameters. The display backend is given a non-zero
/// `surface_id` as a handle for subsequent operations.
fn create_surface(
&mut self,
parent_surface_id: Option<u32>,
width: u32,
height: u32,
) -> Result<u32, GpuDisplayError>;
fn release_surface(&mut self, surface_id: u32);
fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer>;
fn framebuffer_region(
&mut self,
surface_id: u32,
x: u32,
y: u32,
width: u32,
height: u32,
) -> Option<GpuDisplayFramebuffer> {
let framebuffer = self.framebuffer(surface_id)?;
framebuffer.sub_region(x, y, width, height)
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>;
/// Imports memory into the display backend. The display backend is given a non-zero
/// `import_id` as a handle for subsequent operations.
fn import_memory(
&mut self,
_import_id: u32,
_descriptor: &dyn AsRawDescriptor,
_offset: u32,
_stride: u32,
_modifiers: u64,
_width: u32,
_height: u32,
_fourcc: u32,
) -> GpuDisplayResult<Box<dyn GpuDisplayImport>> {
Err(GpuDisplayError::Unsupported)
}
fn commit(&mut self, surface_id: u32);
fn next_buffer_in_use(&self, surface_id: u32) -> bool;
fn flip(&mut self, surface_id: u32);
fn flip_to(&mut self, surface_id: u32, import_id: u32);
fn close_requested(&self, surface_id: u32) -> bool;
fn set_position(&mut self, surface_id: u32, x: u32, y: u32);
fn import_event_device(&mut self, event_device: EventDevice) -> Result<u32, GpuDisplayError>;
fn release_event_device(&mut self, event_device_id: u32);
fn attach_event_device(&mut self, surface_id: u32, event_device_id: u32);
}
/// A connection to the compositor and associated collection of state.
@ -176,10 +277,16 @@ trait DisplayT: AsRawDescriptor {
/// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
pub struct GpuDisplay {
inner: Box<dyn DisplayT>,
next_id: u32,
event_devices: BTreeMap<u32, EventDevice>,
surfaces: BTreeMap<u32, Box<dyn GpuDisplaySurface>>,
imports: BTreeMap<u32, Box<dyn GpuDisplayImport>>,
wait_ctx: WaitContext<DisplayPollToken>,
is_x: bool,
}
impl GpuDisplay {
/// Opens a connection to X server
pub fn open_x<S: AsRef<str>>(display_name: Option<S>) -> GpuDisplayResult<GpuDisplay> {
let _ = display_name;
#[cfg(feature = "x")]
@ -188,8 +295,19 @@ impl GpuDisplay {
Some(s) => gpu_display_x::DisplayX::open_display(Some(s.as_ref()))?,
None => gpu_display_x::DisplayX::open_display(None)?,
};
let inner = Box::new(display);
Ok(GpuDisplay { inner, is_x: true })
let wait_ctx = WaitContext::new()?;
wait_ctx.add(&display, DisplayPollToken::Display)?;
Ok(GpuDisplay {
inner: Box::new(display),
next_id: 1,
event_devices: Default::default(),
surfaces: Default::default(),
imports: Default::default(),
wait_ctx,
is_x: true,
})
}
#[cfg(not(feature = "x"))]
Err(GpuDisplayError::Unsupported)
@ -201,14 +319,35 @@ impl GpuDisplay {
Some(s) => gpu_display_wl::DisplayWl::new(Some(s.as_ref()))?,
None => gpu_display_wl::DisplayWl::new(None)?,
};
let inner = Box::new(display);
Ok(GpuDisplay { inner, is_x: false })
let wait_ctx = WaitContext::new()?;
wait_ctx.add(&display, DisplayPollToken::Display)?;
Ok(GpuDisplay {
inner: Box::new(display),
next_id: 1,
event_devices: Default::default(),
surfaces: Default::default(),
imports: Default::default(),
wait_ctx,
is_x: false,
})
}
pub fn open_stub() -> GpuDisplayResult<GpuDisplay> {
let display = gpu_display_stub::DisplayStub::new()?;
let inner = Box::new(display);
Ok(GpuDisplay { inner, is_x: false })
let wait_ctx = WaitContext::new()?;
wait_ctx.add(&display, DisplayPollToken::Display)?;
Ok(GpuDisplay {
inner: Box::new(display),
next_id: 1,
event_devices: Default::default(),
surfaces: Default::default(),
imports: Default::default(),
wait_ctx,
is_x: false,
})
}
/// Return whether this display is an X display
@ -216,30 +355,67 @@ impl GpuDisplay {
self.is_x
}
/// Imports a dmabuf to the compositor for use as a surface buffer and returns a handle to it.
pub fn import_dmabuf(
&mut self,
fd: RawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> GpuDisplayResult<u32> {
self.inner
.import_dmabuf(fd, offset, stride, modifiers, width, height, fourcc)
fn handle_event_device(&mut self, event_device_id: u32) {
if let Some(event_device) = self.event_devices.get(&event_device_id) {
// TODO(zachr): decode the event and forward to the device.
let _ = event_device.recv_event_encoded();
}
}
/// Releases a previously imported dmabuf identified by the given handle.
pub fn release_import(&mut self, import_id: u32) {
self.inner.release_import(import_id);
fn dispatch_display_events(&mut self) -> GpuDisplayResult<()> {
self.inner.flush();
while self.inner.pending_events() {
let surface_descriptor = self.inner.next_event()?;
for surface in self.surfaces.values_mut() {
if surface_descriptor != surface.surface_descriptor() {
continue;
}
if let Some(gpu_display_events) = self.inner.handle_next_event(surface) {
for event_device in self.event_devices.values_mut() {
if event_device.kind() != gpu_display_events.device_type {
continue;
}
event_device.send_report(gpu_display_events.events.iter().cloned())?;
}
}
}
}
Ok(())
}
/// Dispatches internal events that were received from the compositor since the last call to
/// `dispatch_events`.
pub fn dispatch_events(&mut self) {
self.inner.dispatch_events()
pub fn dispatch_events(&mut self) -> GpuDisplayResult<()> {
let wait_events = self.wait_ctx.wait_timeout(Duration::default())?;
for wait_event in wait_events.iter().filter(|e| e.is_writable) {
if let DisplayPollToken::EventDevice { event_device_id } = wait_event.token {
if let Some(event_device) = self.event_devices.get_mut(&event_device_id) {
if !event_device.flush_buffered_events()? {
continue;
}
self.wait_ctx.modify(
event_device,
EventType::Read,
DisplayPollToken::EventDevice { event_device_id },
)?;
}
}
}
for wait_event in wait_events.iter().filter(|e| e.is_readable) {
match wait_event.token {
DisplayPollToken::Display => self.dispatch_display_events()?,
DisplayPollToken::EventDevice { event_device_id } => {
self.handle_event_device(event_device_id)
}
}
}
Ok(())
}
/// Creates a surface on the the compositor as either a top level window, or child of another
@ -250,17 +426,31 @@ impl GpuDisplay {
width: u32,
height: u32,
) -> GpuDisplayResult<u32> {
self.inner.create_surface(parent_surface_id, width, height)
if let Some(parent_id) = parent_surface_id {
if !self.surfaces.contains_key(&parent_id) {
return Err(GpuDisplayError::InvalidSurfaceId);
}
}
let new_surface_id = self.next_id;
let new_surface =
self.inner
.create_surface(parent_surface_id, new_surface_id, width, height)?;
self.next_id += 1;
self.surfaces.insert(new_surface_id, new_surface);
Ok(new_surface_id)
}
/// Releases a previously created surface identified by the given handle.
pub fn release_surface(&mut self, surface_id: u32) {
self.inner.release_surface(surface_id)
self.surfaces.remove(&surface_id);
}
/// Gets a reference to an unused framebuffer for the identified surface.
pub fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer> {
self.inner.framebuffer(surface_id)
let surface = self.surfaces.get_mut(&surface_id)?;
surface.framebuffer()
}
/// Gets a reference to an unused framebuffer for the identified surface.
@ -272,13 +462,8 @@ impl GpuDisplay {
width: u32,
height: u32,
) -> Option<GpuDisplayFramebuffer> {
self.inner
.framebuffer_region(surface_id, x, y, width, height)
}
/// Commits any pending state for the identified surface.
pub fn commit(&mut self, surface_id: u32) {
self.inner.commit(surface_id)
let framebuffer = self.framebuffer(surface_id)?;
framebuffer.sub_region(x, y, width, height)
}
/// Returns true if the next buffer in the buffer queue for the given surface is currently in
@ -287,52 +472,121 @@ impl GpuDisplay {
/// If the next buffer is in use, the memory returned from `framebuffer_memory` should not be
/// written to.
pub fn next_buffer_in_use(&self, surface_id: u32) -> bool {
self.inner.next_buffer_in_use(surface_id)
self.surfaces
.get(&surface_id)
.map(|s| s.next_buffer_in_use())
.unwrap_or(false)
}
/// Changes the visible contents of the identified surface to the contents of the framebuffer
/// last returned by `framebuffer_memory` for this surface.
pub fn flip(&mut self, surface_id: u32) {
self.inner.flip(surface_id)
if let Some(surface) = self.surfaces.get_mut(&surface_id) {
surface.flip()
}
/// Changes the visible contents of the identified surface to that of the identified imported
/// buffer.
pub fn flip_to(&mut self, surface_id: u32, import_id: u32) {
self.inner.flip_to(surface_id, import_id)
}
/// Returns true if the identified top level surface has been told to close by the compositor,
/// and by extension the user.
pub fn close_requested(&self, surface_id: u32) -> bool {
self.inner.close_requested(surface_id)
self.surfaces
.get(&surface_id)
.map(|s| s.close_requested())
.unwrap_or(true)
}
/// Imports the given `event_device` into the display, returning an event device id on success.
/// This device may be used to poll for input events.
pub fn import_event_device(&mut self, event_device: EventDevice) -> GpuDisplayResult<u32> {
let new_event_device_id = self.next_id;
self.wait_ctx.add(
&event_device,
DisplayPollToken::EventDevice {
event_device_id: new_event_device_id,
},
)?;
self.event_devices.insert(new_event_device_id, event_device);
self.next_id += 1;
Ok(new_event_device_id)
}
/// Release an event device from the display, given an `event_device_id`.
pub fn release_event_device(&mut self, event_device_id: u32) {
self.event_devices.remove(&event_device_id);
}
/// Imports memory to the compositor for use as a surface buffer and returns a handle
/// to it.
pub fn import_memory(
&mut self,
descriptor: &dyn AsRawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> GpuDisplayResult<u32> {
let import_id = self.next_id;
let gpu_display_memory = self.inner.import_memory(
import_id, descriptor, offset, stride, modifiers, width, height, fourcc,
)?;
self.next_id += 1;
self.imports.insert(import_id, gpu_display_memory);
Ok(import_id)
}
/// Releases a previously imported memory identified by the given handle.
pub fn release_import(&mut self, import_id: u32) {
self.imports.remove(&import_id);
}
/// Commits any pending state for the identified surface.
pub fn commit(&mut self, surface_id: u32) -> GpuDisplayResult<()> {
let surface = self
.surfaces
.get_mut(&surface_id)
.ok_or(GpuDisplayError::InvalidSurfaceId)?;
surface.commit()
}
/// Changes the visible contents of the identified surface to that of the identified imported
/// buffer.
pub fn flip_to(&mut self, surface_id: u32, import_id: u32) -> GpuDisplayResult<()> {
let surface = self
.surfaces
.get_mut(&surface_id)
.ok_or(GpuDisplayError::InvalidSurfaceId)?;
if !self.imports.contains_key(&import_id) {
return Err(GpuDisplayError::InvalidImportId);
}
surface.flip_to(import_id);
Ok(())
}
/// Sets the position of the identified subsurface relative to its parent.
///
/// The change in position will not be visible until `commit` is called for the parent surface.
pub fn set_position(&mut self, surface_id: u32, x: u32, y: u32) {
self.inner.set_position(surface_id, x, y)
}
pub fn set_position(&mut self, surface_id: u32, x: u32, y: u32) -> GpuDisplayResult<()> {
let surface = self
.surfaces
.get_mut(&surface_id)
.ok_or(GpuDisplayError::InvalidSurfaceId)?;
pub fn import_event_device(
&mut self,
event_device: EventDevice,
) -> Result<u32, GpuDisplayError> {
self.inner.import_event_device(event_device)
}
pub fn release_event_device(&mut self, event_device_id: u32) {
self.inner.release_event_device(event_device_id)
}
pub fn attach_event_device(&mut self, surface_id: u32, event_device_id: u32) {
self.inner.attach_event_device(surface_id, event_device_id);
surface.set_position(x, y);
Ok(())
}
}
impl AsRawDescriptor for GpuDisplay {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.inner.as_raw_descriptor()
self.wait_ctx.as_raw_descriptor()
}
}