media: cros-codecs: vaapi: factorize open() implementations

We can move open() to be a generic method of StreamMetadataState if its
generic argument implement a trait for providing the necessary
information.

BUG=b:214478588
TEST=cargo test --features vaapi -p cros-codecs

Change-Id: I51efcd7e116accf5532f34de5884a988dc1d0cab
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4123656
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.corp-partner.google.com>
Reviewed-by: Keiichi Watanabe <keiichiw@chromium.org>
Commit-Queue: Alexandre Courbot <acourbot@chromium.org>
This commit is contained in:
Alexandre Courbot 2022-12-22 17:26:40 +09:00 committed by crosvm LUCI
parent 9c1fa06e0f
commit e057618bbf
4 changed files with 215 additions and 299 deletions

View file

@ -18,7 +18,6 @@ use libva::PictureNew;
use libva::PictureParameter;
use libva::PictureParameterBufferH264;
use libva::SliceParameter;
use libva::UsageHint;
use log::debug;
use crate::decoders::h264::backends::ContainedPicture;
@ -42,12 +41,11 @@ use crate::decoders::StatelessBackendError;
use crate::decoders::VideoDecoderBackend;
use crate::utils;
use crate::utils::vaapi::DecodedHandle as VADecodedHandle;
use crate::utils::vaapi::FormatMap;
use crate::utils::vaapi::GenericBackendHandle;
use crate::utils::vaapi::NegotiationStatus;
use crate::utils::vaapi::PendingJob;
use crate::utils::vaapi::StreamInfo;
use crate::utils::vaapi::StreamMetadataState;
use crate::utils::vaapi::SurfacePoolHandle;
use crate::DecodedFormat;
use crate::Resolution;
@ -79,6 +77,58 @@ impl TestParams {
}
}
impl StreamInfo for &Sps {
fn va_profile(&self) -> anyhow::Result<i32> {
let profile_idc = self.profile_idc();
let profile = Profile::n(profile_idc)
.with_context(|| format!("Invalid profile_idc {:?}", profile_idc))?;
match profile {
Profile::Baseline => {
if self.constraint_set0_flag() {
Ok(libva::VAProfile::VAProfileH264ConstrainedBaseline)
} else {
Err(anyhow!(
"Unsupported stream: profile_idc=66, but constraint_set0_flag is unset"
))
}
}
Profile::Main => Ok(libva::VAProfile::VAProfileH264Main),
Profile::High => Ok(libva::VAProfile::VAProfileH264High),
}
}
fn rt_format(&self) -> anyhow::Result<u32> {
let bit_depth_luma = self.bit_depth_chroma_minus8() + 8;
let chroma_format_idc = self.chroma_format_idc();
match bit_depth_luma {
8 => match chroma_format_idc {
0 | 1 => Ok(libva::constants::VA_RT_FORMAT_YUV420),
_ => Err(anyhow!(
"Unsupported chroma_format_idc: {}",
chroma_format_idc
)),
},
_ => Err(anyhow!("Unsupported bit depth: {}", bit_depth_luma)),
}
}
fn min_num_surfaces(&self) -> usize {
self.max_dpb_frames().unwrap() + 4
}
fn coded_size(&self) -> (u32, u32) {
(self.width(), self.height())
}
fn visible_rect(&self) -> ((u32, u32), (u32, u32)) {
let rect = self.visible_rectangle();
((rect.min.x, rect.min.y), (rect.max.x, rect.max.y))
}
}
/// H.264 stateless decoder backend for VA-API.
struct Backend {
/// The metadata state. Updated whenever the decoder reads new data from the stream.
@ -110,124 +160,6 @@ impl Backend {
})
}
/// Initializes or reinitializes the codec state.
fn open(&mut self, sps: &Sps, format_map: Option<&FormatMap>) -> Result<()> {
let display = self.metadata_state.display();
let profile_idc = sps.profile_idc();
let profile = Profile::n(profile_idc)
.with_context(|| format!("Invalid profile_idc {:?}", profile_idc))?;
let va_profile = Backend::get_profile(profile, sps.constraint_set0_flag())?;
let rt_format =
Backend::get_rt_fmt(sps.bit_depth_chroma_minus8() + 8, sps.chroma_format_idc())?;
let frame_w = sps.width();
let frame_h = sps.height();
let attrs = vec![libva::VAConfigAttrib {
type_: libva::VAConfigAttribType::VAConfigAttribRTFormat,
value: rt_format,
}];
let config =
display.create_config(attrs, va_profile, libva::VAEntrypoint::VAEntrypointVLD)?;
let format_map = if let Some(format_map) = format_map {
format_map
} else {
// Pick the first one that fits
utils::vaapi::FORMAT_MAP
.iter()
.find(|&map| map.rt_format == rt_format)
.ok_or(anyhow!("Unsupported format {}", rt_format))?
};
let map_format = display
.query_image_formats()?
.iter()
.find(|f| f.fourcc == format_map.va_fourcc)
.cloned()
.unwrap();
let num_surfaces = sps.max_dpb_frames()? + 4;
let surfaces = display.create_surfaces(
rt_format,
Some(map_format.fourcc),
frame_w,
frame_h,
Some(UsageHint::USAGE_HINT_DECODER),
num_surfaces as u32,
)?;
let context = display.create_context(
&config,
i32::try_from(frame_w)?,
i32::try_from(frame_h)?,
Some(&surfaces),
true,
)?;
let coded_resolution = Resolution {
width: frame_w,
height: frame_h,
};
let visible_rect = sps.visible_rectangle();
let display_resolution = Resolution {
width: visible_rect.max.x - visible_rect.min.x,
height: visible_rect.max.y - visible_rect.min.y,
};
let surface_pool = SurfacePoolHandle::new(surfaces, coded_resolution);
self.metadata_state = StreamMetadataState::Parsed {
context,
config,
surface_pool,
min_num_surfaces: num_surfaces,
coded_resolution,
display_resolution,
map_format: Rc::new(map_format),
rt_format,
profile: va_profile,
};
Ok(())
}
fn get_profile(profile: Profile, constraint_set0_flag: bool) -> Result<libva::VAProfile::Type> {
match profile {
Profile::Baseline => {
if constraint_set0_flag {
Ok(libva::VAProfile::VAProfileH264ConstrainedBaseline)
} else {
Err(anyhow!(
"Unsupported stream: profile_idc=66, but constraint_set0_flag is unset"
))
}
}
Profile::Main => Ok(libva::VAProfile::VAProfileH264Main),
Profile::High => Ok(libva::VAProfile::VAProfileH264High),
}
}
fn get_rt_fmt(bit_depth_luma: u8, chroma_format_idc: u8) -> Result<u32> {
match bit_depth_luma {
8 => match chroma_format_idc {
0 | 1 => Ok(libva::constants::VA_RT_FORMAT_YUV420),
_ => Err(anyhow!(
"Unsupported chroma_format_idc: {}",
chroma_format_idc
)),
},
_ => Err(anyhow!("Unsupported bit depth: {}", bit_depth_luma)),
}
}
/// Gets the VASurfaceID for the given `picture`.
fn surface_id(picture: &H264Picture<GenericBackendHandle>) -> libva::VASurfaceID {
if picture.nonexisting {
@ -642,7 +574,7 @@ impl VideoDecoderBackend for Backend {
.unwrap();
let sps = sps.clone();
self.open(&sps, Some(map_format))?;
self.metadata_state.open(sps.as_ref(), Some(map_format))?;
Ok(())
} else {
@ -738,7 +670,7 @@ impl VideoDecoderBackend for Backend {
impl StatelessDecoderBackend for Backend {
fn new_sequence(&mut self, sps: &Sps) -> StatelessBackendResult<()> {
self.open(sps, None)?;
self.metadata_state.open(sps, None)?;
self.negotiation_status = NegotiationStatus::Possible(Box::new(sps.clone()));
Ok(())

View file

@ -17,7 +17,6 @@ use libva::IQMatrix;
use libva::IQMatrixBufferVP8;
use libva::Picture as VaPicture;
use libva::ProbabilityDataBufferVP8;
use libva::UsageHint;
use crate::decoders::h264::backends::Result as StatelessBackendResult;
use crate::decoders::vp8::backends::AsBackendHandle;
@ -37,12 +36,11 @@ use crate::decoders::StatelessBackendError;
use crate::decoders::VideoDecoderBackend;
use crate::utils;
use crate::utils::vaapi::DecodedHandle as VADecodedHandle;
use crate::utils::vaapi::FormatMap;
use crate::utils::vaapi::GenericBackendHandle;
use crate::utils::vaapi::NegotiationStatus;
use crate::utils::vaapi::PendingJob;
use crate::utils::vaapi::StreamInfo;
use crate::utils::vaapi::StreamMetadataState;
use crate::utils::vaapi::SurfacePoolHandle;
use crate::DecodedFormat;
use crate::Resolution;
@ -61,6 +59,28 @@ struct TestParams {
probability_table: BufferType,
}
impl StreamInfo for &Header {
fn va_profile(&self) -> anyhow::Result<i32> {
Ok(libva::VAProfile::VAProfileVP8Version0_3)
}
fn rt_format(&self) -> anyhow::Result<u32> {
Ok(libva::constants::VA_RT_FORMAT_YUV420)
}
fn min_num_surfaces(&self) -> usize {
NUM_SURFACES
}
fn coded_size(&self) -> (u32, u32) {
(self.width() as u32, self.height() as u32)
}
fn visible_rect(&self) -> ((u32, u32), (u32, u32)) {
((0, 0), self.coded_size())
}
}
struct Backend {
/// The metadata state. Updated whenever the decoder reads new data from the stream.
metadata_state: StreamMetadataState,
@ -99,81 +119,6 @@ impl Backend {
}
}
/// Initialize the codec state by reading some metadata from the current
/// frame.
fn open(&mut self, frame_hdr: &Header, format_map: Option<&FormatMap>) -> Result<()> {
let display = self.metadata_state.display();
let va_profile = libva::VAProfile::VAProfileVP8Version0_3;
let rt_format = libva::constants::VA_RT_FORMAT_YUV420;
let frame_w = u32::from(frame_hdr.width());
let frame_h = u32::from(frame_hdr.height());
let attrs = vec![libva::VAConfigAttrib {
type_: libva::VAConfigAttribType::VAConfigAttribRTFormat,
value: rt_format,
}];
let config =
display.create_config(attrs, va_profile, libva::VAEntrypoint::VAEntrypointVLD)?;
let format_map = if let Some(format_map) = format_map {
format_map
} else {
// Pick the first one that fits
utils::vaapi::FORMAT_MAP
.iter()
.find(|&map| map.rt_format == rt_format)
.ok_or(anyhow!("Unsupported format {}", rt_format))?
};
let map_format = display
.query_image_formats()?
.iter()
.find(|f| f.fourcc == format_map.va_fourcc)
.cloned()
.unwrap();
let surfaces = display.create_surfaces(
rt_format,
Some(map_format.fourcc),
frame_w,
frame_h,
Some(UsageHint::USAGE_HINT_DECODER),
NUM_SURFACES as u32,
)?;
let context = display.create_context(
&config,
i32::try_from(frame_w)?,
i32::try_from(frame_h)?,
Some(&surfaces),
true,
)?;
let coded_resolution = Resolution {
width: frame_w,
height: frame_h,
};
let surface_pool = SurfacePoolHandle::new(surfaces, coded_resolution);
self.metadata_state = StreamMetadataState::Parsed {
context,
config,
surface_pool,
min_num_surfaces: NUM_SURFACES,
coded_resolution,
display_resolution: coded_resolution, // TODO(dwlsalmeida)
map_format: Rc::new(map_format),
rt_format,
profile: va_profile,
};
Ok(())
}
/// Gets the VASurfaceID for the given `picture`.
fn surface_id(picture: &Vp8Picture<GenericBackendHandle>) -> libva::VASurfaceID {
picture.backend_handle.as_ref().unwrap().surface_id()
@ -368,7 +313,7 @@ impl Backend {
impl StatelessDecoderBackend for Backend {
fn new_sequence(&mut self, header: &Header) -> StatelessBackendResult<()> {
self.open(header, None)?;
self.metadata_state.open(header, None)?;
self.negotiation_status = NegotiationStatus::Possible(Box::new(header.clone()));
Ok(())
@ -535,7 +480,8 @@ impl VideoDecoderBackend for Backend {
.find(|&map| map.decoded_format == format)
.unwrap();
self.open(&header, Some(map_format))?;
self.metadata_state
.open(header.as_ref(), Some(map_format))?;
Ok(())
} else {

View file

@ -13,7 +13,6 @@ use libva::BufferType;
use libva::Display;
use libva::Picture as VaPicture;
use libva::SegmentParameterVP9;
use libva::UsageHint;
use crate::decoders::vp9::backends::AsBackendHandle;
use crate::decoders::vp9::backends::ContainedPicture;
@ -50,12 +49,11 @@ use crate::decoders::StatelessBackendError;
use crate::decoders::VideoDecoderBackend;
use crate::utils;
use crate::utils::vaapi::DecodedHandle as VADecodedHandle;
use crate::utils::vaapi::FormatMap;
use crate::utils::vaapi::GenericBackendHandle;
use crate::utils::vaapi::NegotiationStatus;
use crate::utils::vaapi::PendingJob;
use crate::utils::vaapi::StreamInfo;
use crate::utils::vaapi::StreamMetadataState;
use crate::utils::vaapi::SurfacePoolHandle;
use crate::DecodedFormat;
use crate::Resolution;
@ -72,6 +70,38 @@ struct TestParams {
slice_data: BufferType,
}
impl StreamInfo for &Header {
fn va_profile(&self) -> anyhow::Result<i32> {
Ok(match self.profile() {
Profile::Profile0 => libva::VAProfile::VAProfileVP9Profile0,
Profile::Profile1 => libva::VAProfile::VAProfileVP9Profile1,
Profile::Profile2 => libva::VAProfile::VAProfileVP9Profile2,
Profile::Profile3 => libva::VAProfile::VAProfileVP9Profile3,
})
}
fn rt_format(&self) -> anyhow::Result<u32> {
Backend::get_rt_format(
self.profile(),
self.bit_depth(),
self.subsampling_x(),
self.subsampling_y(),
)
}
fn min_num_surfaces(&self) -> usize {
NUM_SURFACES
}
fn coded_size(&self) -> (u32, u32) {
(self.width() as u32, self.height() as u32)
}
fn visible_rect(&self) -> ((u32, u32), (u32, u32)) {
((0, 0), self.coded_size())
}
}
#[derive(Clone, Debug, Default, PartialEq)]
struct Segmentation {
/// Loop filter level
@ -135,15 +165,6 @@ impl Backend {
}
}
fn get_profile(profile: Profile) -> libva::VAProfile::Type {
match profile {
Profile::Profile0 => libva::VAProfile::VAProfileVP9Profile0,
Profile::Profile1 => libva::VAProfile::VAProfileVP9Profile1,
Profile::Profile2 => libva::VAProfile::VAProfileVP9Profile2,
Profile::Profile3 => libva::VAProfile::VAProfileVP9Profile3,
}
}
fn get_rt_format(
profile: Profile,
bit_depth: BitDepth,
@ -208,88 +229,6 @@ impl Backend {
}
}
/// Initialize the codec state by reading some metadata from the current
/// frame.
fn open(&mut self, hdr: &Header, format_map: Option<&FormatMap>) -> Result<()> {
let display = self.metadata_state.display();
let hdr_profile = hdr.profile();
let va_profile = Self::get_profile(hdr_profile);
let rt_format = Self::get_rt_format(
hdr_profile,
hdr.bit_depth(),
hdr.subsampling_x(),
hdr.subsampling_y(),
)?;
let frame_w = hdr.width();
let frame_h = hdr.height();
let attrs = vec![libva::VAConfigAttrib {
type_: libva::VAConfigAttribType::VAConfigAttribRTFormat,
value: rt_format,
}];
let config =
display.create_config(attrs, va_profile, libva::VAEntrypoint::VAEntrypointVLD)?;
let format_map = if let Some(format_map) = format_map {
format_map
} else {
// Pick the first one that fits
utils::vaapi::FORMAT_MAP
.iter()
.find(|&map| map.rt_format == rt_format)
.ok_or(anyhow!("Unsupported format {}", rt_format))?
};
let map_format = display
.query_image_formats()?
.iter()
.find(|f| f.fourcc == format_map.va_fourcc)
.cloned()
.unwrap();
let surfaces = display.create_surfaces(
rt_format,
Some(map_format.fourcc),
frame_w,
frame_h,
Some(UsageHint::USAGE_HINT_DECODER),
NUM_SURFACES as u32,
)?;
let context = display.create_context(
&config,
i32::try_from(frame_w)?,
i32::try_from(frame_h)?,
Some(&surfaces),
true,
)?;
let coded_resolution = Resolution {
width: frame_w,
height: frame_h,
};
let surface_pool = SurfacePoolHandle::new(surfaces, coded_resolution);
self.metadata_state = StreamMetadataState::Parsed {
context,
config,
surface_pool,
min_num_surfaces: NUM_SURFACES,
coded_resolution,
display_resolution: coded_resolution, // TODO(dwlsalmeida)
map_format: Rc::new(map_format),
rt_format,
profile: va_profile,
};
Ok(())
}
/// Gets the VASurfaceID for the given `picture`.
fn surface_id(picture: &Vp9Picture<GenericBackendHandle>) -> libva::VASurfaceID {
picture.backend_handle.as_ref().unwrap().surface_id()
@ -579,7 +518,7 @@ impl Backend {
impl StatelessDecoderBackend for Backend {
fn new_sequence(&mut self, header: &Header) -> StatelessBackendResult<()> {
self.open(header, None)?;
self.metadata_state.open(header, None)?;
self.negotiation_status = NegotiationStatus::Possible(Box::new(header.clone()));
Ok(())
@ -722,7 +661,8 @@ impl VideoDecoderBackend for Backend {
.find(|&map| map.decoded_format == format)
.unwrap();
self.open(&header, Some(map_format))?;
self.metadata_state
.open(header.as_ref(), Some(map_format))?;
Ok(())
} else {

View file

@ -248,6 +248,20 @@ impl SurfacePoolHandle {
}
}
/// A trait for providing the basic information needed to setup libva for decoding.
pub(crate) trait StreamInfo {
/// Returns the VA profile of the stream.
fn va_profile(&self) -> anyhow::Result<i32>;
/// Returns the RT format of the stream.
fn rt_format(&self) -> anyhow::Result<u32>;
/// Returns the minimum number of surfaces required to decode the stream.
fn min_num_surfaces(&self) -> usize;
/// Returns the coded size of the surfaces required to decode the stream.
fn coded_size(&self) -> (u32, u32);
/// Returns the visible rectangle within the coded size for the stream.
fn visible_rect(&self) -> ((u32, u32), (u32, u32));
}
/// State of the input stream, which can be either unparsed (we don't know the stream properties
/// yet) or parsed (we know the stream properties and are ready to decode).
pub(crate) enum StreamMetadataState {
@ -379,6 +393,90 @@ impl StreamMetadataState {
Ok(formats.into_iter().map(|f| f.decoded_format).collect())
}
/// Initializes or reinitializes the codec state.
pub(crate) fn open<S: StreamInfo>(
&mut self,
hdr: S,
format_map: Option<&FormatMap>,
) -> Result<()> {
let display = self.display();
let va_profile = hdr.va_profile()?;
let rt_format = hdr.rt_format()?;
let (frame_w, frame_h) = hdr.coded_size();
let attrs = vec![libva::VAConfigAttrib {
type_: libva::VAConfigAttribType::VAConfigAttribRTFormat,
value: rt_format,
}];
let config =
display.create_config(attrs, va_profile, libva::VAEntrypoint::VAEntrypointVLD)?;
let format_map = if let Some(format_map) = format_map {
format_map
} else {
// Pick the first one that fits
FORMAT_MAP
.iter()
.find(|&map| map.rt_format == rt_format)
.ok_or(anyhow!("Unsupported format {}", rt_format))?
};
let map_format = display
.query_image_formats()?
.iter()
.find(|f| f.fourcc == format_map.va_fourcc)
.cloned()
.unwrap();
let min_num_surfaces = hdr.min_num_surfaces();
let surfaces = display.create_surfaces(
rt_format,
Some(map_format.fourcc),
frame_w,
frame_h,
Some(libva::UsageHint::USAGE_HINT_DECODER),
min_num_surfaces as u32,
)?;
let context = display.create_context(
&config,
i32::try_from(frame_w)?,
i32::try_from(frame_h)?,
Some(&surfaces),
true,
)?;
let coded_resolution = Resolution {
width: frame_w,
height: frame_h,
};
let visible_rect = hdr.visible_rect();
let display_resolution = Resolution {
width: visible_rect.1 .0 - visible_rect.0 .0,
height: visible_rect.1 .1 - visible_rect.0 .1,
};
let surface_pool = SurfacePoolHandle::new(surfaces, coded_resolution);
*self = StreamMetadataState::Parsed {
context,
config,
surface_pool,
min_num_surfaces,
coded_resolution,
display_resolution,
map_format: Rc::new(map_format),
rt_format,
profile: va_profile,
};
Ok(())
}
}
/// The VA-API backend handle.