video: Don't do colorspace conversion in the video backend

This commit is contained in:
TÖRÖK Attila 2023-03-19 21:21:25 +01:00 committed by Mike Welsh
parent 0fab8b3ece
commit 2ad43994b8
6 changed files with 86 additions and 87 deletions

1
Cargo.lock generated
View File

@ -3623,7 +3623,6 @@ dependencies = [
"flate2",
"generational-arena",
"h263-rs",
"h263-rs-yuv",
"log",
"nihav_codec_support",
"nihav_core",

View File

@ -74,8 +74,8 @@ impl Bitmap {
if data.len() != expected_len {
tracing::warn!(
"Incorrect bitmap data size, expected {} bytes, got {}",
expected_len,
data.len(),
expected_len
);
// Truncate or zero pad to the expected size.
data.resize(expected_len, 0);

View File

@ -17,13 +17,12 @@ flate2 = "1.0.25"
log = "0.4"
h263-rs = { git = "https://github.com/ruffle-rs/h263-rs", rev = "d5d78eb251c1ce1f1da57c63db14f0fdc77a4b36", optional = true }
h263-rs-yuv = { git = "https://github.com/ruffle-rs/h263-rs", rev = "d5d78eb251c1ce1f1da57c63db14f0fdc77a4b36", optional = true }
nihav_core = { git = "https://github.com/ruffle-rs/nihav-vp6", rev = "9416fcc9fc8aab8f4681aa9093b42922214abbd3", optional = true }
nihav_codec_support = { git = "https://github.com/ruffle-rs/nihav-vp6", rev = "9416fcc9fc8aab8f4681aa9093b42922214abbd3", optional = true }
nihav_duck = { git = "https://github.com/ruffle-rs/nihav-vp6", rev = "9416fcc9fc8aab8f4681aa9093b42922214abbd3", optional = true }
[features]
default = ["h263", "vp6", "screenvideo"]
h263 = ["h263-rs", "h263-rs-yuv"]
vp6 = ["nihav_core", "nihav_codec_support", "nihav_duck", "h263-rs-yuv"]
h263 = ["h263-rs"]
vp6 = ["nihav_core", "nihav_codec_support", "nihav_duck"]
screenvideo = []

View File

@ -1,7 +1,7 @@
use crate::decoder::VideoDecoder;
use generational_arena::Arena;
use ruffle_render::backend::RenderBackend;
use ruffle_render::bitmap::{Bitmap, BitmapFormat, BitmapHandle, BitmapInfo, PixelRegion};
use ruffle_render::bitmap::{BitmapHandle, BitmapInfo, PixelRegion};
use ruffle_video::backend::VideoBackend;
use ruffle_video::error::Error;
use ruffle_video::frame::{EncodedFrame, FrameDependency};
@ -78,33 +78,22 @@ impl VideoBackend for SoftwareVideoBackend {
.ok_or(Error::VideoStreamIsNotRegistered)?;
let frame = stream.decoder.decode_frame(encoded_frame)?;
let w = frame.width();
let h = frame.height();
let handle = if let Some(bitmap) = stream.bitmap.clone() {
renderer.update_texture(
&bitmap,
Bitmap::new(
frame.width(),
frame.height(),
BitmapFormat::Rgba,
frame.data().to_vec(),
),
PixelRegion::for_whole_size(frame.width(), frame.height()),
)?;
renderer.update_texture(&bitmap, frame, PixelRegion::for_whole_size(w, h))?;
bitmap
} else {
let bitmap = Bitmap::new(
frame.width(),
frame.height(),
BitmapFormat::Rgba,
frame.data().to_vec(),
);
renderer.register_bitmap(bitmap)?
renderer.register_bitmap(frame)?
};
stream.bitmap = Some(handle.clone());
Ok(BitmapInfo {
handle,
width: frame.width() as u16,
height: frame.height() as u16,
width: w as u16,
height: h as u16,
})
}
}

View File

@ -1,7 +1,6 @@
use crate::decoder::VideoDecoder;
use h263_rs::parser::H263Reader;
use h263_rs::{DecoderOption, H263State, PictureTypeCode};
use h263_rs_yuv::bt601::yuv420_to_rgba;
use ruffle_render::bitmap::BitmapFormat;
use ruffle_video::error::Error;
use ruffle_video::frame::{DecodedFrame, EncodedFrame, FrameDependency};
@ -69,15 +68,17 @@ impl VideoDecoder for H263Decoder {
.format()
.into_width_and_height()
.ok_or(H263Error::MissingWidthHeight)?;
let chroma_width = picture.chroma_samples_per_row();
debug_assert_eq!(chroma_width, (width as usize + 1) / 2);
let (y, b, r) = picture.as_yuv();
let rgba = yuv420_to_rgba(y, b, r, width.into());
let mut data = y.to_vec();
data.extend(b);
data.extend(r);
Ok(DecodedFrame::new(
width as u32,
height as u32,
BitmapFormat::Rgba,
rgba,
BitmapFormat::Yuv420p,
data,
))
}
}

View File

@ -2,8 +2,6 @@ use crate::decoder::VideoDecoder;
use ruffle_render::bitmap::BitmapFormat;
use ruffle_video::error::Error;
use h263_rs_yuv::bt601::yuv420_to_rgba;
use nihav_codec_support::codecs::{NABufferRef, NAVideoBuffer, NAVideoInfo};
use nihav_codec_support::codecs::{NABufferType::Video, YUV420_FORMAT};
use nihav_core::codecs::NADecoderSupport;
@ -71,6 +69,31 @@ impl Vp6Decoder {
}
}
fn crop(data: &[u8], mut width: usize, to_size: (u16, u16)) -> Vec<u8> {
debug_assert!(data.len() % width == 0);
let mut height = data.len() / width;
let mut data = data.to_vec();
if width > to_size.0 as usize {
// Removing the unwanted pixels on the right edge
// by squishing all the rows tightly next to each other.
let new_width = to_size.0 as usize;
let new_height = usize::min(height, to_size.1 as usize);
// no need to move the first row, nor any rows on the bottom that will end up being cropped entirely
for row in 1..new_height {
data.copy_within(row * width..(row * width + new_width), row * new_width);
}
width = new_width;
height = new_height;
}
// Cropping the unwanted rows on the bottom, also dropping any unused space at the end left by the squish above
height = usize::min(height, to_size.1 as usize);
data.truncate(width * height);
data
}
impl VideoDecoder for Vp6Decoder {
fn preload_frame(&mut self, encoded_frame: EncodedFrame<'_>) -> Result<FrameDependency, Error> {
// Luckily the very first bit of the encoded frames is exactly this flag,
@ -151,8 +174,6 @@ impl VideoDecoder for Vp6Decoder {
frame
};
// Converting it from YUV420 to RGBA.
let yuv = frame.get_data();
let (mut width, mut height) = frame.get_dimensions(0);
@ -172,32 +193,9 @@ impl VideoDecoder for Vp6Decoder {
frame.get_offset(2),
);
let mut rgba = yuv420_to_rgba(
&yuv[offsets.0..offsets.0 + width * height],
&yuv[offsets.1..offsets.1 + chroma_width * chroma_height],
&yuv[offsets.2..offsets.2 + chroma_width * chroma_height],
width,
);
// Adding in the alpha component, if present.
if self.with_alpha {
debug_assert!(frame.get_stride(3) == frame.get_dimensions(3).0);
let alpha_offset = frame.get_offset(3);
let alpha = &yuv[alpha_offset..alpha_offset + width * height];
for (alpha, rgba) in alpha.iter().zip(rgba.chunks_mut(4)) {
// The SWF spec mandates the `min` to avoid any accidental "invalid"
// premultiplied colors, which would cause strange results after blending.
// And the alpha data is encoded in full range (0-255), unlike the Y
// component of the main color data, so no remapping is needed.
rgba.copy_from_slice(&[
u8::min(rgba[0], *alpha),
u8::min(rgba[1], *alpha),
u8::min(rgba[2], *alpha),
*alpha,
]);
}
}
let y = &yuv[offsets.0..offsets.0 + width * height];
let u = &yuv[offsets.1..offsets.1 + chroma_width * chroma_height];
let v = &yuv[offsets.2..offsets.2 + chroma_width * chroma_height];
// Cropping the encoded frame (containing whole macroblocks) to the
// size requested by the the bounds attribute.
@ -209,33 +207,46 @@ impl VideoDecoder for Vp6Decoder {
// Flash Player just produces a black image in this case!
}
if width > bounds.0 as usize {
// Removing the unwanted pixels on the right edge (most commonly: unused pieces of macroblocks)
// by squishing all the rows tightly next to each other.
// Bitmap at the moment does not allow these gaps, so we need to remove them.
let new_width = bounds.0 as usize;
let new_height = usize::min(height, bounds.1 as usize);
// no need to move the first row, nor any rows on the bottom that will end up being cropped entirely
for row in 1..new_height {
rgba.copy_within(
row * width * 4..(row * width + new_width) * 4,
row * new_width * 4,
);
}
width = new_width;
height = new_height;
//(most commonly: unused pieces of macroblocks)
// Bitmap at the moment does not allow these gaps, so we need to remove them.
let y = crop(y, width, bounds);
let u = crop(u, chroma_width, ((bounds.0 + 1) / 2, (bounds.1 + 1) / 2));
let v = crop(v, chroma_width, ((bounds.0 + 1) / 2, (bounds.1 + 1) / 2));
width = bounds.0 as usize;
height = bounds.1 as usize;
// Adding in the alpha component, if present.
if self.with_alpha {
debug_assert!(frame.get_stride(3) == frame.get_dimensions(3).0);
let alpha_offset = frame.get_offset(3);
let alpha = &yuv[alpha_offset..alpha_offset + width * height];
let a = crop(alpha, width, bounds);
let mut data = y.to_vec();
data.extend(u);
data.extend(v);
data.extend(a);
Ok(DecodedFrame::new(
width as u32,
height as u32,
BitmapFormat::Yuva420p,
data,
))
} else {
let mut data = y.to_vec();
data.extend(u);
data.extend(v);
Ok(DecodedFrame::new(
width as u32,
height as u32,
BitmapFormat::Yuv420p,
data,
))
}
// Cropping the unwanted rows on the bottom, also dropping any unused space at the end left by the squish above
height = usize::min(height, bounds.1 as usize);
rgba.truncate(width * height * 4);
Ok(DecodedFrame::new(
width as u32,
height as u32,
BitmapFormat::Rgba,
rgba,
))
}
}