mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2024-11-24 12:34:31 +00:00
win_audio: Upstream Windows audio backend code
The most notable changes are: * Playback Async support * Audio Client flags to hide app from SndVol when audio session has expired * Updates to audio_streams and cros_async crates for playback async support BUG=b:256655413 TEST=Verified to work downstream Change-Id: Ifbe9a15791feaa41d6e1d5eaf2c5824b2c7c25b8 Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3994882 Commit-Queue: Richard Zhang <rizhang@google.com> Reviewed-by: Noah Gold <nkgold@google.com>
This commit is contained in:
parent
8d69b30c14
commit
24ed80fd99
8 changed files with 568 additions and 75 deletions
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -2191,8 +2191,10 @@ name = "win_audio"
|
|||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"audio_streams",
|
||||
"base",
|
||||
"cros_async",
|
||||
"libc",
|
||||
"metrics",
|
||||
"once_cell",
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
use std::io::Result;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::net::UnixStream;
|
||||
#[cfg(windows)]
|
||||
use std::os::windows::io::RawHandle;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
@ -45,6 +47,13 @@ pub trait WriteAsync {
|
|||
) -> Result<(usize, Vec<u8>)>;
|
||||
}
|
||||
|
||||
/// Trait to wrap around EventAsync, because audio_streams can't depend on anything in `base` or
|
||||
/// `cros_async`.
|
||||
#[async_trait(?Send)]
|
||||
pub trait EventAsyncWrapper {
|
||||
async fn wait(&self) -> Result<u64>;
|
||||
}
|
||||
|
||||
pub trait ReadWriteAsync: ReadAsync + WriteAsync {}
|
||||
|
||||
pub type AsyncStream = Box<dyn ReadWriteAsync + Send>;
|
||||
|
@ -56,6 +65,15 @@ pub trait AudioStreamsExecutor {
|
|||
#[cfg(unix)]
|
||||
fn async_unix_stream(&self, f: UnixStream) -> Result<AsyncStream>;
|
||||
|
||||
/// Wraps an event that will be triggered when the audio backend is ready to read more audio
|
||||
/// data.
|
||||
///
|
||||
/// # Safety
|
||||
/// Callers needs to make sure the `RawHandle` is an Event, or at least a valid Handle for
|
||||
/// their use case.
|
||||
#[cfg(windows)]
|
||||
unsafe fn async_event(&self, event: RawHandle) -> Result<Box<dyn EventAsyncWrapper>>;
|
||||
|
||||
/// Returns a future that resolves after the specified time.
|
||||
async fn delay(&self, dur: Duration) -> Result<()>;
|
||||
}
|
||||
|
@ -79,5 +97,9 @@ pub mod test {
|
|||
std::thread::sleep(dur);
|
||||
return Ok(());
|
||||
}
|
||||
#[cfg(windows)]
|
||||
unsafe fn async_event(&self, _event: RawHandle) -> Result<Box<dyn EventAsyncWrapper>> {
|
||||
unimplemented!("async_event is not yet implemented on windows");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,18 +9,24 @@
|
|||
use std::io::Result;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::net::UnixStream;
|
||||
#[cfg(windows)]
|
||||
use std::os::windows::io::RawHandle;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
#[cfg(unix)]
|
||||
use audio_streams::async_api::AsyncStream;
|
||||
use audio_streams::async_api::AudioStreamsExecutor;
|
||||
use audio_streams::async_api::EventAsyncWrapper;
|
||||
use audio_streams::async_api::ReadAsync;
|
||||
use audio_streams::async_api::ReadWriteAsync;
|
||||
use audio_streams::async_api::WriteAsync;
|
||||
#[cfg(windows)]
|
||||
use base::{Event, FromRawDescriptor};
|
||||
|
||||
#[cfg(unix)]
|
||||
use super::AsyncWrapper;
|
||||
use crate::EventAsync;
|
||||
use crate::IntoAsync;
|
||||
use crate::IoSourceExt;
|
||||
use crate::TimerAsync;
|
||||
|
@ -61,6 +67,13 @@ impl<T: IntoAsync + Send> WriteAsync for IoSourceWrapper<T> {
|
|||
#[async_trait(?Send)]
|
||||
impl<T: IntoAsync + Send> ReadWriteAsync for IoSourceWrapper<T> {}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl EventAsyncWrapper for EventAsync {
|
||||
async fn wait(&self) -> Result<u64> {
|
||||
self.next_val().await.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl AudioStreamsExecutor for super::Executor {
|
||||
#[cfg(unix)]
|
||||
|
@ -70,6 +83,16 @@ impl AudioStreamsExecutor for super::Executor {
|
|||
}));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
/// This is only safe if `event` is a handle to a Windows Event.
|
||||
#[cfg(windows)]
|
||||
unsafe fn async_event(&self, event: RawHandle) -> Result<Box<dyn EventAsyncWrapper>> {
|
||||
Ok(Box::new(
|
||||
EventAsync::new(Event::from_raw_descriptor(event), self)
|
||||
.map_err(std::io::Error::from)?,
|
||||
))
|
||||
}
|
||||
|
||||
async fn delay(&self, dur: Duration) -> Result<()> {
|
||||
TimerAsync::sleep(self, dur).await.map_err(Into::into)
|
||||
}
|
||||
|
|
|
@ -5,8 +5,10 @@ authors = ["The Chromium OS Authors"]
|
|||
edition = "2021"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
async-trait = "0.1.36"
|
||||
audio_streams = { path = "../common/audio_streams"}
|
||||
base = { path = "../base" }
|
||||
cros_async = { path = "../cros_async" }
|
||||
libc = "*"
|
||||
win_util = { path = "../win_util" }
|
||||
winapi = "*"
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#![cfg(windows)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
include!(concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
|
@ -25,18 +24,37 @@ mod win_audio_impl;
|
|||
use std::error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use audio_streams::AsyncPlaybackBufferStream;
|
||||
use audio_streams::NoopStream;
|
||||
use audio_streams::NoopStreamSource;
|
||||
use audio_streams::NoopStreamSourceGenerator;
|
||||
use audio_streams::PlaybackBufferStream;
|
||||
use audio_streams::SampleFormat;
|
||||
use audio_streams::StreamSource;
|
||||
use base::info;
|
||||
use base::warn;
|
||||
use sync::Mutex;
|
||||
use win_audio_impl::*;
|
||||
use win_audio_impl::async_stream::WinAudioStreamSourceGenerator;
|
||||
pub use win_audio_impl::*;
|
||||
|
||||
pub type BoxError = Box<dyn error::Error + Send + Sync>;
|
||||
|
||||
pub trait WinStreamSourceGenerator: Send + Sync {
|
||||
fn generate(&self) -> Result<Box<dyn WinAudioServer>, BoxError>;
|
||||
}
|
||||
|
||||
impl WinStreamSourceGenerator for WinAudioStreamSourceGenerator {
|
||||
fn generate(&self) -> std::result::Result<Box<dyn WinAudioServer>, BoxError> {
|
||||
Ok(Box::new(WinAudio::new()?))
|
||||
}
|
||||
}
|
||||
|
||||
impl WinStreamSourceGenerator for NoopStreamSourceGenerator {
|
||||
fn generate(&self) -> Result<Box<dyn WinAudioServer>, BoxError> {
|
||||
Ok(Box::new(NoopStreamSource))
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains information about the audio engine's properties, such as its audio sample format
|
||||
/// and its period in frames.
|
||||
///
|
||||
|
@ -66,9 +84,22 @@ pub trait WinAudioServer: StreamSource {
|
|||
buffer_size: usize,
|
||||
) -> Result<(Arc<Mutex<Box<dyn PlaybackBufferStream>>>, AudioSharedFormat), BoxError>;
|
||||
|
||||
fn new_async_playback_stream_and_get_shared_format(
|
||||
&mut self,
|
||||
_num_channels: usize,
|
||||
_format: SampleFormat,
|
||||
_frame_rate: usize,
|
||||
_buffer_size: usize,
|
||||
_ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<(Box<dyn AsyncPlaybackBufferStream>, AudioSharedFormat), BoxError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Evict the playback stream cache so that the audio device can be released, thus allowing
|
||||
/// for machines to go to sleep.
|
||||
fn evict_playback_stream_cache(&mut self);
|
||||
fn evict_playback_stream_cache(&mut self) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Returns true if audio server is a noop stream. This determine if evicting a cache is worth
|
||||
/// doing
|
||||
|
@ -140,6 +171,57 @@ impl WinAudioServer for WinAudio {
|
|||
Ok((playback_buffer_stream, audio_shared_format))
|
||||
}
|
||||
|
||||
fn new_async_playback_stream_and_get_shared_format(
|
||||
&mut self,
|
||||
num_channels: usize,
|
||||
_format: SampleFormat,
|
||||
frame_rate: usize,
|
||||
buffer_size: usize,
|
||||
ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<(Box<dyn AsyncPlaybackBufferStream>, AudioSharedFormat), BoxError> {
|
||||
let hr = WinAudio::co_init_once_per_thread();
|
||||
let _ = check_hresult!(hr, RenderError::from(hr), "Co Initialized failed");
|
||||
|
||||
let (async_playback_buffer_stream, audio_shared_format): (
|
||||
Box<dyn AsyncPlaybackBufferStream>,
|
||||
AudioSharedFormat,
|
||||
) = match win_audio_impl::WinAudioRenderer::new_async(
|
||||
num_channels,
|
||||
frame_rate as u32,
|
||||
buffer_size,
|
||||
ex,
|
||||
) {
|
||||
Ok(renderer) => {
|
||||
let audio_shared_format = renderer.device.audio_shared_format;
|
||||
let renderer_box = Box::new(renderer) as Box<dyn AsyncPlaybackBufferStream>;
|
||||
(renderer_box, audio_shared_format)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to create WinAudioRenderer. Fallback to NoopStream with error: {}",
|
||||
e
|
||||
);
|
||||
(
|
||||
Box::new(NoopStream::new(
|
||||
num_channels,
|
||||
SampleFormat::S16LE,
|
||||
frame_rate as u32,
|
||||
buffer_size,
|
||||
)),
|
||||
AudioSharedFormat {
|
||||
bit_depth: 16,
|
||||
frame_rate,
|
||||
channels: 2,
|
||||
shared_audio_engine_period_in_frames: frame_rate / 100,
|
||||
channel_mask: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
Ok((async_playback_buffer_stream, audio_shared_format))
|
||||
}
|
||||
|
||||
fn evict_playback_stream_cache(&mut self) {
|
||||
self.cached_playback_buffer_stream = None;
|
||||
}
|
||||
|
@ -172,10 +254,6 @@ impl WinAudioServer for NoopStreamSource {
|
|||
))
|
||||
}
|
||||
|
||||
fn evict_playback_stream_cache(&mut self) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn is_noop_stream(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
|
254
win_audio/src/win_audio_impl/async_stream.rs
Normal file
254
win_audio/src/win_audio_impl/async_stream.rs
Normal file
|
@ -0,0 +1,254 @@
|
|||
// Copyright 2022 The ChromiumOS Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use audio_streams::AsyncBufferCommit;
|
||||
use audio_streams::AsyncPlaybackBuffer;
|
||||
use audio_streams::AsyncPlaybackBufferStream;
|
||||
use audio_streams::BoxError;
|
||||
use audio_streams::NoopStream;
|
||||
use audio_streams::NoopStreamControl;
|
||||
use audio_streams::SampleFormat;
|
||||
use audio_streams::StreamControl;
|
||||
use audio_streams::StreamSource;
|
||||
use audio_streams::StreamSourceGenerator;
|
||||
use base::error;
|
||||
use base::warn;
|
||||
|
||||
use crate::DeviceRenderer;
|
||||
use crate::RenderError;
|
||||
use crate::WinAudio;
|
||||
use crate::WinAudioRenderer;
|
||||
|
||||
pub struct WinAudioStreamSourceGenerator {}
|
||||
|
||||
impl StreamSourceGenerator for WinAudioStreamSourceGenerator {
|
||||
fn generate(&self) -> std::result::Result<Box<dyn StreamSource>, BoxError> {
|
||||
Ok(Box::new(WinAudio::new()?))
|
||||
}
|
||||
}
|
||||
|
||||
impl WinAudio {
|
||||
pub(super) fn new_async_playback_stream_helper(
|
||||
num_channels: usize,
|
||||
_format: SampleFormat,
|
||||
frame_rate: u32,
|
||||
buffer_size: usize,
|
||||
ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<
|
||||
(
|
||||
Box<dyn StreamControl>,
|
||||
Box<dyn audio_streams::AsyncPlaybackBufferStream>,
|
||||
),
|
||||
BoxError,
|
||||
> {
|
||||
let hr = WinAudio::co_init_once_per_thread();
|
||||
let _ = check_hresult!(hr, RenderError::from(hr), "Co Initialized failed");
|
||||
|
||||
let playback_buffer_stream: Box<dyn AsyncPlaybackBufferStream> =
|
||||
match WinAudioRenderer::new_async(num_channels, frame_rate, buffer_size, ex) {
|
||||
Ok(renderer) => Box::new(renderer),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to create WinAudioRenderer. Fallback to NoopStream with error: {}",
|
||||
e
|
||||
);
|
||||
Box::new(NoopStream::new(
|
||||
num_channels,
|
||||
SampleFormat::S16LE,
|
||||
frame_rate,
|
||||
buffer_size,
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok((Box::new(NoopStreamControl::new()), playback_buffer_stream))
|
||||
}
|
||||
}
|
||||
|
||||
impl WinAudioRenderer {
|
||||
/// Constructor to allow for async audio backend.
|
||||
pub fn new_async(
|
||||
num_channels: usize,
|
||||
frame_rate: u32,
|
||||
incoming_buffer_size_in_frames: usize,
|
||||
ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<Self, RenderError> {
|
||||
let device = Self::create_device_renderer_and_log_time(
|
||||
num_channels,
|
||||
frame_rate,
|
||||
incoming_buffer_size_in_frames,
|
||||
Some(ex),
|
||||
)?;
|
||||
Ok(Self {
|
||||
device,
|
||||
num_channels,
|
||||
frame_rate, // guest frame rate
|
||||
incoming_buffer_size_in_frames, // from the guest`
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl AsyncPlaybackBufferStream for WinAudioRenderer {
|
||||
async fn next_playback_buffer<'a>(
|
||||
&'a mut self,
|
||||
_ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<audio_streams::AsyncPlaybackBuffer<'a>, BoxError> {
|
||||
for _ in 0..Self::MAX_REATTACH_TRIES {
|
||||
match self.device.async_next_win_buffer().await {
|
||||
Ok(_) => {
|
||||
return self
|
||||
.device
|
||||
.async_playback_buffer()
|
||||
.map_err(|e| Box::new(e) as _)
|
||||
}
|
||||
// If the audio device was disconnected, set up whatever is now the default device
|
||||
// and loop to try again.
|
||||
Err(RenderError::DeviceInvalidated) => {
|
||||
warn!("Audio device disconnected, switching to new default device");
|
||||
self.reattach_device()?;
|
||||
}
|
||||
Err(e) => return Err(Box::new(e)),
|
||||
}
|
||||
}
|
||||
error!("Unable to attach to a working audio device, giving up");
|
||||
Err(Box::new(RenderError::DeviceInvalidated))
|
||||
}
|
||||
}
|
||||
|
||||
impl DeviceRenderer {
|
||||
/// Similiar to `next_win_buffer`, this is the async version that will return a wraper around
|
||||
/// the WASAPI buffer.
|
||||
///
|
||||
/// Unlike `next_win_buffer`, there is no timeout if `async_ready_to_read_event` doesn't fire.
|
||||
/// This should be fine, since the end result with or without the timeout will be no audio.
|
||||
async fn async_next_win_buffer(&mut self) -> Result<(), RenderError> {
|
||||
self.win_buffer = MaybeUninit::uninit().as_mut_ptr();
|
||||
|
||||
// We will wait for windows to tell us when it is ready to take in the next set of
|
||||
// audio samples from the guest
|
||||
loop {
|
||||
let async_ready_to_read_event = self
|
||||
.async_ready_to_read_event
|
||||
.as_ref()
|
||||
.ok_or(RenderError::MissingEventAsync)?;
|
||||
async_ready_to_read_event.wait().await.map_err(|e| {
|
||||
RenderError::AsyncError(
|
||||
e,
|
||||
"Failed to wait for async event to get next playback buffer.".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if self.enough_available_frames()? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.get_buffer()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Similar to `playback_buffer`. This will return an `AsyncPlaybackBuffer` that allows for
|
||||
/// async operations.
|
||||
///
|
||||
/// Due to the way WASAPI is, `AsyncPlaybackBuffer` won't actually have any async operations
|
||||
/// that will await.
|
||||
fn async_playback_buffer(&mut self) -> Result<AsyncPlaybackBuffer, RenderError> {
|
||||
// Safe because `win_buffer` is allocated and retrieved from WASAPI. The size requested,
|
||||
// which we specified in `next_win_buffer` is exactly
|
||||
// `shared_audio_engine_period_in_frames`, so the size parameter should be valid.
|
||||
let (frame_size_bytes, buffer_slice) = unsafe {
|
||||
Self::get_frame_size_and_buffer_slice(
|
||||
self.audio_shared_format.bit_depth as usize,
|
||||
self.audio_shared_format.channels as usize,
|
||||
self.win_buffer,
|
||||
self.audio_shared_format
|
||||
.shared_audio_engine_period_in_frames,
|
||||
)?
|
||||
};
|
||||
|
||||
AsyncPlaybackBuffer::new(frame_size_bytes, buffer_slice, self)
|
||||
.map_err(RenderError::PlaybackBuffer)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl AsyncBufferCommit for DeviceRenderer {
|
||||
async fn commit(&mut self, nframes: usize) {
|
||||
// Safe because `audio_render_client` is initialized and parameters passed
|
||||
// into `ReleaseBuffer()` are valid
|
||||
unsafe {
|
||||
let hr = self.audio_render_client.ReleaseBuffer(nframes as u32, 0);
|
||||
let _ = check_hresult!(
|
||||
hr,
|
||||
RenderError::from(hr),
|
||||
"Audio Render Client ReleaseBuffer() failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use cros_async::Executor;
|
||||
|
||||
// This test is meant to run through the normal audio playback procedure in order to make
|
||||
// debugging easier. The test needs to be ran with a playback device format of 48KHz,
|
||||
// stereo, 16bit. This test might not pass on AMD, since its period is 513 instead of 480.
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_async() {
|
||||
async fn test(ex: &Executor) {
|
||||
let stream_source_generator: Box<dyn StreamSourceGenerator> =
|
||||
Box::new(WinAudioStreamSourceGenerator {});
|
||||
let mut stream_source = stream_source_generator
|
||||
.generate()
|
||||
.expect("Failed to create stream source.");
|
||||
|
||||
let (_, mut async_pb_stream) = stream_source
|
||||
.new_async_playback_stream(2, SampleFormat::S16LE, 48000, 480, ex)
|
||||
.expect("Failed to create async playback stream.");
|
||||
|
||||
// The `ex` here won't be used, but it will satisfy the trait function requirement.
|
||||
let mut async_pb_buffer = async_pb_stream
|
||||
.next_playback_buffer(ex)
|
||||
.await
|
||||
.expect("Failed to get next playback buffer");
|
||||
|
||||
// The buffer size is calculated by "period * channels * bit depth". The actual buffer
|
||||
// from `next_playback_buffer` may vary, depending on the device format and the user's
|
||||
// machine.
|
||||
let buffer = [1u8; 480 * 2 * 2];
|
||||
|
||||
async_pb_buffer
|
||||
.copy_cb(buffer.len(), |out| out.copy_from_slice(&buffer))
|
||||
.unwrap();
|
||||
|
||||
async_pb_buffer.commit().await;
|
||||
|
||||
let mut async_pb_buffer = async_pb_stream
|
||||
.next_playback_buffer(ex)
|
||||
.await
|
||||
.expect("Failed to get next playback buffer");
|
||||
|
||||
let buffer = [1u8; 480 * 2 * 2];
|
||||
|
||||
async_pb_buffer
|
||||
.copy_cb(buffer.len(), |out| out.copy_from_slice(&buffer))
|
||||
.unwrap();
|
||||
|
||||
async_pb_buffer.commit().await;
|
||||
}
|
||||
|
||||
let ex = Executor::new().expect("Failed to create executor.");
|
||||
|
||||
ex.run_until(test(&ex)).unwrap();
|
||||
}
|
||||
}
|
|
@ -2,9 +2,12 @@
|
|||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
pub mod async_stream;
|
||||
mod completion_handler;
|
||||
mod wave_format;
|
||||
|
||||
use std::convert::From;
|
||||
use std::fmt::Debug;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::os::raw::c_void;
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::Arc;
|
||||
|
@ -12,6 +15,8 @@ use std::sync::Once;
|
|||
use std::thread_local;
|
||||
use std::time::Duration;
|
||||
|
||||
use audio_streams::async_api::EventAsyncWrapper;
|
||||
use audio_streams::AsyncPlaybackBufferStream;
|
||||
use audio_streams::BoxError;
|
||||
use audio_streams::BufferCommit;
|
||||
use audio_streams::NoopStream;
|
||||
|
@ -44,6 +49,8 @@ use winapi::shared::mmreg::WAVEFORMATEX;
|
|||
use winapi::shared::winerror::S_FALSE;
|
||||
use winapi::shared::winerror::S_OK;
|
||||
use winapi::um::audioclient::*;
|
||||
use winapi::um::audiosessiontypes::AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED;
|
||||
use winapi::um::audiosessiontypes::AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED;
|
||||
use winapi::um::audiosessiontypes::AUDCLNT_SHAREMODE_SHARED;
|
||||
use winapi::um::audiosessiontypes::AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
|
||||
use winapi::um::combaseapi::*;
|
||||
|
@ -62,9 +69,6 @@ use wio::com::ComPtr;
|
|||
|
||||
use crate::AudioSharedFormat;
|
||||
|
||||
mod completion_handler;
|
||||
mod wave_format;
|
||||
|
||||
const READY_TO_READ_TIMEOUT_MS: u32 = 2000;
|
||||
pub const STEREO_CHANNEL_COUNT: u16 = 2;
|
||||
pub const MONO_CHANNEL_COUNT: u16 = 1;
|
||||
|
@ -116,9 +120,9 @@ impl WinAudio {
|
|||
}
|
||||
|
||||
impl StreamSource for WinAudio {
|
||||
// Returns a stream control and a buffer generator object. The stream control object is not used.
|
||||
// The buffer generator object is a wrapper around WASAPI's objects that will create a buffer for
|
||||
// crosvm to copy audio bytes into.
|
||||
/// Returns a stream control and a buffer generator object. The stream control object is not
|
||||
/// used. The buffer generator object is a wrapper around WASAPI's objects that will create a
|
||||
/// buffer for crosvm to copy audio bytes into.
|
||||
fn new_playback_stream(
|
||||
&mut self,
|
||||
num_channels: usize,
|
||||
|
@ -148,6 +152,25 @@ impl StreamSource for WinAudio {
|
|||
|
||||
Ok((Box::new(NoopStreamControl::new()), playback_buffer_stream))
|
||||
}
|
||||
|
||||
/// Similar to `new_playback_stream, but will return an `AsyncPlaybackBufferStream` that can
|
||||
/// run async operations.
|
||||
fn new_async_playback_stream(
|
||||
&mut self,
|
||||
num_channels: usize,
|
||||
format: SampleFormat,
|
||||
frame_rate: u32,
|
||||
buffer_size: usize,
|
||||
ex: &dyn audio_streams::AudioStreamsExecutor,
|
||||
) -> Result<(Box<dyn StreamControl>, Box<dyn AsyncPlaybackBufferStream>), BoxError> {
|
||||
WinAudio::new_async_playback_stream_helper(
|
||||
num_channels,
|
||||
format,
|
||||
frame_rate,
|
||||
buffer_size,
|
||||
ex,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Proxy for a `DeviceRenderer` that handles device invalidated errors by switching to a new
|
||||
|
@ -160,20 +183,21 @@ pub(crate) struct WinAudioRenderer {
|
|||
}
|
||||
|
||||
impl WinAudioRenderer {
|
||||
const MAX_REATTACH_TRIES: usize = 50;
|
||||
|
||||
// Initializes WASAPI objects needed for audio.
|
||||
pub fn new(
|
||||
num_channels: usize,
|
||||
frame_rate: u32,
|
||||
incoming_buffer_size_in_frames: usize,
|
||||
) -> Result<Self, RenderError> {
|
||||
let start = std::time::Instant::now();
|
||||
let device = DeviceRenderer::new(num_channels, frame_rate, incoming_buffer_size_in_frames)?;
|
||||
// This can give us insights to how other long other machines take to intialize audio.
|
||||
// Eventually this should be a histogram metric.
|
||||
info!(
|
||||
"DeviceRenderer took {}ms to initialize audio.",
|
||||
start.elapsed().as_millis()
|
||||
);
|
||||
let device = Self::create_device_renderer_and_log_time(
|
||||
num_channels,
|
||||
frame_rate,
|
||||
incoming_buffer_size_in_frames,
|
||||
None,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
device,
|
||||
num_channels,
|
||||
|
@ -182,6 +206,24 @@ impl WinAudioRenderer {
|
|||
})
|
||||
}
|
||||
|
||||
fn create_device_renderer_and_log_time(
|
||||
num_channels: usize,
|
||||
frame_rate: u32,
|
||||
incoming_buffer_size_in_frames: usize,
|
||||
ex: Option<&dyn audio_streams::AudioStreamsExecutor>,
|
||||
) -> Result<DeviceRenderer, RenderError> {
|
||||
let start = std::time::Instant::now();
|
||||
let device =
|
||||
DeviceRenderer::new(num_channels, frame_rate, incoming_buffer_size_in_frames, ex)?;
|
||||
// This can give us insights to how other long other machines take to initialize audio.
|
||||
// Eventually this should be a histogram metric.
|
||||
info!(
|
||||
"DeviceRenderer took {}ms to initialize audio.",
|
||||
start.elapsed().as_millis()
|
||||
);
|
||||
Ok(device)
|
||||
}
|
||||
|
||||
// Drops the existing DeviceRenderer and initializes a new DeviceRenderer for the default
|
||||
// device.
|
||||
fn reattach_device(&mut self) -> Result<(), RenderError> {
|
||||
|
@ -189,6 +231,7 @@ impl WinAudioRenderer {
|
|||
self.num_channels,
|
||||
self.frame_rate,
|
||||
self.incoming_buffer_size_in_frames,
|
||||
None,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -197,8 +240,7 @@ impl WinAudioRenderer {
|
|||
impl PlaybackBufferStream for WinAudioRenderer {
|
||||
/// Returns a wrapper around the WASAPI buffer.
|
||||
fn next_playback_buffer<'b, 's: 'b>(&'s mut self) -> Result<PlaybackBuffer<'b>, BoxError> {
|
||||
const MAX_REATTACH_TRIES: usize = 50;
|
||||
for _ in 0..MAX_REATTACH_TRIES {
|
||||
for _ in 0..Self::MAX_REATTACH_TRIES {
|
||||
match self.device.next_win_buffer() {
|
||||
Ok(_) => return self.device.playback_buffer().map_err(|e| Box::new(e) as _),
|
||||
// If the audio device was disconnected, set up whatever is now the default device
|
||||
|
@ -220,10 +262,11 @@ impl PlaybackBufferStream for WinAudioRenderer {
|
|||
pub(crate) struct DeviceRenderer {
|
||||
audio_render_client: ComPtr<IAudioRenderClient>,
|
||||
audio_client: ComPtr<IAudioClient>,
|
||||
win_buffer: *mut *mut u8,
|
||||
win_buffer: *mut u8,
|
||||
pub audio_shared_format: AudioSharedFormat,
|
||||
audio_render_client_buffer_frame_count: u32,
|
||||
ready_to_read_event: Event,
|
||||
async_ready_to_read_event: Option<Box<dyn EventAsyncWrapper>>,
|
||||
}
|
||||
|
||||
impl DeviceRenderer {
|
||||
|
@ -232,6 +275,7 @@ impl DeviceRenderer {
|
|||
num_channels: usize,
|
||||
_guest_frame_rate: u32,
|
||||
incoming_buffer_size_in_frames: usize,
|
||||
ex: Option<&dyn audio_streams::AudioStreamsExecutor>,
|
||||
) -> Result<Self, RenderError> {
|
||||
if num_channels > 2 {
|
||||
return Err(RenderError::InvalidChannelCount(num_channels));
|
||||
|
@ -252,7 +296,9 @@ impl DeviceRenderer {
|
|||
AUDCLNT_SHAREMODE_SHARED,
|
||||
AUDCLNT_STREAMFLAGS_EVENTCALLBACK
|
||||
| AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM
|
||||
| AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY,
|
||||
| AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY
|
||||
| AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED
|
||||
| AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED,
|
||||
0, /* hnsBufferDuration */
|
||||
0, /* hnsPeriodicity */
|
||||
format.as_ptr(),
|
||||
|
@ -271,7 +317,7 @@ impl DeviceRenderer {
|
|||
let hr = unsafe { audio_client.SetEventHandle(ready_to_read_event.as_raw_descriptor()) };
|
||||
check_hresult!(hr, RenderError::from(hr), "SetEventHandle() failed.")?;
|
||||
|
||||
let audio_render_client = DeviceRenderer::create_audio_render_client(&*audio_client)?;
|
||||
let audio_render_client = DeviceRenderer::create_audio_render_client(&audio_client)?;
|
||||
|
||||
let mut shared_default_size_in_100nanoseconds: i64 = 0;
|
||||
let mut exclusive_min: i64 = 0;
|
||||
|
@ -322,14 +368,29 @@ impl DeviceRenderer {
|
|||
"Audio Render Client Start() failed."
|
||||
)?;
|
||||
|
||||
let async_ready_to_read_event = if let Some(ex) = ex {
|
||||
// Unsafe if `ready_to_read_event` and `async_ready_to_read_event` have different
|
||||
// lifetimes because both can close the underlying `RawDescriptor`. However, both
|
||||
// are stored in the `DeviceRenderer` fields, so this is safe.
|
||||
Some(unsafe {
|
||||
ex.async_event(ready_to_read_event.as_raw_descriptor())
|
||||
.map_err(|e| {
|
||||
RenderError::AsyncError(e, "Failed to create async event".to_string())
|
||||
})?
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
audio_render_client,
|
||||
audio_client,
|
||||
win_buffer: MaybeUninit::uninit().as_mut_ptr(),
|
||||
win_buffer: std::ptr::null_mut(),
|
||||
audio_shared_format: format
|
||||
.create_audio_shared_format(shared_audio_engine_period_in_frames),
|
||||
audio_render_client_buffer_frame_count,
|
||||
ready_to_read_event,
|
||||
async_ready_to_read_event,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -366,7 +427,7 @@ impl DeviceRenderer {
|
|||
}
|
||||
|
||||
info!("Audio Engine Mix Format Used: \n{:?}", format);
|
||||
Self::check_format(&*audio_client, &format, wave_format_details, event_code)?;
|
||||
Self::check_format(audio_client, &format, wave_format_details, event_code)?;
|
||||
|
||||
Ok(format)
|
||||
}
|
||||
|
@ -506,7 +567,7 @@ impl DeviceRenderer {
|
|||
|
||||
// Safe because `device` is guaranteed to be initialized
|
||||
let device = unsafe { ComPtr::from_raw(device) };
|
||||
Self::print_device_info(&*device)?;
|
||||
Self::print_device_info(&device)?;
|
||||
|
||||
// Call Windows API functions to get the `async_op` which will be used to retrieve the
|
||||
// AudioClient. More details above function definition.
|
||||
|
@ -672,8 +733,6 @@ impl DeviceRenderer {
|
|||
|
||||
// Returns a wraper around the WASAPI buffer
|
||||
fn next_win_buffer(&mut self) -> Result<(), RenderError> {
|
||||
self.win_buffer = MaybeUninit::uninit().as_mut_ptr();
|
||||
|
||||
// We will wait for windows to tell us when it is ready to take in the next set of
|
||||
// audio samples from the guest
|
||||
loop {
|
||||
|
@ -691,35 +750,53 @@ impl DeviceRenderer {
|
|||
);
|
||||
break;
|
||||
}
|
||||
let num_frames_padding: *mut u32 = MaybeUninit::uninit().as_mut_ptr();
|
||||
let hr = self.audio_client.GetCurrentPadding(num_frames_padding);
|
||||
check_hresult!(
|
||||
hr,
|
||||
RenderError::from(hr),
|
||||
"Audio Client GetCurrentPadding() failed."
|
||||
)?;
|
||||
|
||||
// If the available free frames is less than the frames that are being sent over from the guest, then
|
||||
// we want to only grab the number of frames available.
|
||||
let num_frames_available =
|
||||
(self.audio_render_client_buffer_frame_count - *num_frames_padding) as usize;
|
||||
|
||||
if num_frames_available
|
||||
>= self
|
||||
.audio_shared_format
|
||||
.shared_audio_engine_period_in_frames
|
||||
{
|
||||
if self.enough_available_frames()? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.get_buffer()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the number of frames avaialble in the Windows playback buffer is at least
|
||||
/// the size of one full period worth of audio samples.
|
||||
fn enough_available_frames(&mut self) -> Result<bool, RenderError> {
|
||||
// Safe because `num_frames_padding` is an u32 and `GetCurrentPadding` is a simple
|
||||
// Windows function that shouldn't fail.
|
||||
let mut num_frames_padding = 0u32;
|
||||
let hr = unsafe { self.audio_client.GetCurrentPadding(&mut num_frames_padding) };
|
||||
check_hresult!(
|
||||
hr,
|
||||
RenderError::from(hr),
|
||||
"Audio Client GetCurrentPadding() failed."
|
||||
)?;
|
||||
|
||||
// If the available free frames is less than the frames that are being sent over from the
|
||||
// guest, then we want to only grab the number of frames available.
|
||||
let num_frames_available =
|
||||
(self.audio_render_client_buffer_frame_count - num_frames_padding) as usize;
|
||||
|
||||
Ok(num_frames_available
|
||||
>= self
|
||||
.audio_shared_format
|
||||
.shared_audio_engine_period_in_frames)
|
||||
}
|
||||
|
||||
fn get_buffer(&mut self) -> Result<(), RenderError> {
|
||||
self.win_buffer = std::ptr::null_mut();
|
||||
|
||||
// This unsafe block will get the playback buffer and return the size of the buffer
|
||||
//
|
||||
// This is safe because the contents of `win_buffer` will be
|
||||
// released when `ReleaseBuffer` is called in the `BufferCommit` implementation.
|
||||
unsafe {
|
||||
let hr = self.audio_render_client.GetBuffer(
|
||||
self.audio_shared_format
|
||||
.shared_audio_engine_period_in_frames as u32,
|
||||
self.win_buffer,
|
||||
&mut self.win_buffer,
|
||||
);
|
||||
check_hresult!(
|
||||
hr,
|
||||
|
@ -732,29 +809,51 @@ impl DeviceRenderer {
|
|||
}
|
||||
|
||||
fn playback_buffer(&mut self) -> Result<PlaybackBuffer, RenderError> {
|
||||
if self.win_buffer.is_null() {
|
||||
return Err(RenderError::InvalidBuffer);
|
||||
}
|
||||
|
||||
let frame_size_bytes = self.audio_shared_format.bit_depth as usize
|
||||
* self.audio_shared_format.channels as usize
|
||||
/ 8;
|
||||
|
||||
// Safe because `win_buffer` is allocated and retrieved from WASAPI. The size requested,
|
||||
// which we specified in `next_win_buffer` is exactly
|
||||
// `shared_audio_engine_period_in_frames`, so the size parameter should be valid.
|
||||
let buffer_slice = unsafe {
|
||||
std::slice::from_raw_parts_mut(
|
||||
*self.win_buffer,
|
||||
let (frame_size_bytes, buffer_slice) = unsafe {
|
||||
Self::get_frame_size_and_buffer_slice(
|
||||
self.audio_shared_format.bit_depth as usize,
|
||||
self.audio_shared_format.channels as usize,
|
||||
self.win_buffer,
|
||||
self.audio_shared_format
|
||||
.shared_audio_engine_period_in_frames
|
||||
* frame_size_bytes,
|
||||
)
|
||||
.shared_audio_engine_period_in_frames,
|
||||
)?
|
||||
};
|
||||
|
||||
PlaybackBuffer::new(frame_size_bytes, buffer_slice, self)
|
||||
.map_err(RenderError::PlaybackBuffer)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Safe only if:
|
||||
/// 1. `win_buffer` is pointing to a valid buffer used for holding audio bytes.
|
||||
/// 2. `bit_depth`, `channels`, and `shared_audio_engine_period_in_frames` are accurate with
|
||||
/// respect to `win_buffer`, so that a valid slice can be made.
|
||||
/// 3. The variables mentioned in reason "2." must calculate a size no greater than the size
|
||||
/// of the buffer pointed to by `win_buffer`.
|
||||
unsafe fn get_frame_size_and_buffer_slice<'a>(
|
||||
bit_depth: usize,
|
||||
channels: usize,
|
||||
win_buffer: *mut u8,
|
||||
shared_audio_engine_period_in_frames: usize,
|
||||
) -> Result<(usize, &'a mut [u8]), RenderError> {
|
||||
if win_buffer.is_null() {
|
||||
return Err(RenderError::InvalidBuffer);
|
||||
}
|
||||
|
||||
let frame_size_bytes = bit_depth * channels / 8;
|
||||
|
||||
Ok((
|
||||
frame_size_bytes,
|
||||
std::slice::from_raw_parts_mut(
|
||||
win_buffer,
|
||||
shared_audio_engine_period_in_frames * frame_size_bytes,
|
||||
),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferCommit for DeviceRenderer {
|
||||
|
@ -789,7 +888,7 @@ impl Drop for DeviceRenderer {
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a> Send for DeviceRenderer {}
|
||||
unsafe impl Send for DeviceRenderer {}
|
||||
|
||||
#[derive(Debug, ThisError)]
|
||||
pub enum RenderError {
|
||||
|
@ -814,6 +913,10 @@ pub enum RenderError {
|
|||
GenericError,
|
||||
#[error("Invalid guest channel count {0} is > than 2")]
|
||||
InvalidChannelCount(usize),
|
||||
#[error("Async related error: {0}: {1}")]
|
||||
AsyncError(std::io::Error, String),
|
||||
#[error("Ready to read async event was not set during win_audio initialization.")]
|
||||
MissingEventAsync,
|
||||
}
|
||||
|
||||
impl From<i32> for RenderError {
|
||||
|
@ -877,7 +980,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_create_win_audio_renderer_no_co_initliazed() {
|
||||
let _shared = SERIALIZE_LOCK.lock();
|
||||
let win_audio_renderer = DeviceRenderer::new(2, 48000, 720);
|
||||
let win_audio_renderer = DeviceRenderer::new(2, 48000, 720, None);
|
||||
assert!(win_audio_renderer.is_err());
|
||||
}
|
||||
|
||||
|
@ -886,7 +989,7 @@ mod tests {
|
|||
fn test_create_win_audio_renderer() {
|
||||
let _shared = SERIALIZE_LOCK.lock();
|
||||
let _co_init = SafeCoInit::new_coinitialize();
|
||||
let win_audio_renderer_result = DeviceRenderer::new(2, 48000, 480);
|
||||
let win_audio_renderer_result = DeviceRenderer::new(2, 48000, 480, None);
|
||||
assert!(win_audio_renderer_result.is_ok());
|
||||
let win_audio_renderer = win_audio_renderer_result.unwrap();
|
||||
assert_eq!(
|
||||
|
@ -916,7 +1019,7 @@ mod tests {
|
|||
// there is no way to copy audio samples over succiently.
|
||||
fn test_guest_buffer_size_bigger_than_audio_render_client_buffer_size() {
|
||||
let _shared = SERIALIZE_LOCK.lock();
|
||||
let win_audio_renderer = DeviceRenderer::new(2, 48000, 100000);
|
||||
let win_audio_renderer = DeviceRenderer::new(2, 48000, 100000, None);
|
||||
|
||||
assert!(win_audio_renderer.is_err());
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ impl WaveAudioFormat {
|
|||
/// Unsafe if `wave_format_ptr` is pointing to null. This function will assume it's not null
|
||||
/// and dereference it.
|
||||
/// Also `format_ptr` will be deallocated after this function completes, so it cannot be used.
|
||||
#[allow(clippy::let_and_return)]
|
||||
pub unsafe fn new(format_ptr: *mut WAVEFORMATEX) -> Self {
|
||||
let format_tag = { (*format_ptr).wFormatTag };
|
||||
let result = if format_tag != WAVE_FORMAT_EXTENSIBLE {
|
||||
|
@ -278,12 +279,20 @@ impl Debug for WaveAudioFormat {
|
|||
|
||||
let subformat = wave_format_extensible.SubFormat;
|
||||
|
||||
if !IsEqualGUID(
|
||||
&{ wave_format_extensible.SubFormat },
|
||||
&KSDATAFORMAT_SUBTYPE_IEEE_FLOAT,
|
||||
) {
|
||||
warn!("Audio Engine format is NOT IEEE FLOAT");
|
||||
}
|
||||
// TODO(b/240186720): Passing in `KSDATAFORMAT_SUBTYPE_PCM` will cause a
|
||||
// freeze. IsEqualGUID is unsafe even though it isn't marked as such. Look into
|
||||
// fixing or possibily write our own, that works.
|
||||
//
|
||||
// This check would be a nice to have, but not necessary. Right now, the subformat
|
||||
// used will always be `IEEE_FLOAT`, so this check will be useless if nothing
|
||||
// changes.
|
||||
//
|
||||
// if !IsEqualGUID(
|
||||
// &{ wave_format_extensible.SubFormat },
|
||||
// &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT,
|
||||
// ) {
|
||||
// warn!("Audio Engine format is NOT IEEE FLOAT");
|
||||
// }
|
||||
|
||||
let audio_engine_extensible_format = format!(
|
||||
"\nSamples: {}, \ndwChannelMask: {}, \nSubFormat: {}-{}-{}-{:?}",
|
||||
|
|
Loading…
Reference in a new issue