render: Implement more PixelBender features (#11800)

This commit is contained in:
Aaron Hill 2023-07-01 13:50:43 -04:00 committed by GitHub
parent 710be15f78
commit 6e1f00edf2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 2006 additions and 503 deletions

2
Cargo.lock generated
View File

@ -2844,6 +2844,7 @@ dependencies = [
"anyhow", "anyhow",
"bitflags 2.3.2", "bitflags 2.3.2",
"naga", "naga",
"naga_oil",
"ruffle_render", "ruffle_render",
] ]
@ -3855,6 +3856,7 @@ dependencies = [
"thiserror", "thiserror",
"tracing", "tracing",
"wasm-bindgen", "wasm-bindgen",
"wgpu",
] ]
[[package]] [[package]]

View File

@ -40,6 +40,8 @@ gc-arena = { git = "https://github.com/kyren/gc-arena", rev = "63dab12871321e0e5
tracing = "0.1.37" tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
naga = { version = "0.12.2", features = ["validate", "wgsl-out"] } naga = { version = "0.12.2", features = ["validate", "wgsl-out"] }
naga_oil = "0.7.0"
wgpu = { version = "0.16.1" }
# Don't optimize build scripts and macros. # Don't optimize build scripts and macros.
[profile.release.build-override] [profile.release.build-override]

View File

@ -1,4 +1,8 @@
use ruffle_render::filters::{DisplacementMapFilter, DisplacementMapFilterMode, Filter}; use gc_arena::{Collect, DynamicRoot, Rootable};
use ruffle_render::filters::{
DisplacementMapFilter, DisplacementMapFilterMode, Filter, ShaderFilter, ShaderObject,
};
use std::fmt::Debug;
use swf::{ use swf::{
BevelFilter, BevelFilterFlags, BlurFilter, BlurFilterFlags, Color, ColorMatrixFilter, BevelFilter, BevelFilterFlags, BlurFilter, BlurFilterFlags, Color, ColorMatrixFilter,
ConvolutionFilter, ConvolutionFilterFlags, DropShadowFilter, DropShadowFilterFlags, Fixed16, ConvolutionFilter, ConvolutionFilterFlags, DropShadowFilter, DropShadowFilterFlags, Fixed16,
@ -8,6 +12,8 @@ use swf::{
use crate::avm2::error::{make_error_2008, type_error}; use crate::avm2::error::{make_error_2008, type_error};
use crate::avm2::{Activation, ArrayObject, ClassObject, Error, Object, TObject, Value}; use crate::avm2::{Activation, ArrayObject, ClassObject, Error, Object, TObject, Value};
use super::globals::flash::display::shader_job::get_shader_args;
pub trait FilterAvm2Ext { pub trait FilterAvm2Ext {
fn from_avm2_object<'gc>( fn from_avm2_object<'gc>(
activation: &mut Activation<'_, 'gc>, activation: &mut Activation<'_, 'gc>,
@ -20,6 +26,26 @@ pub trait FilterAvm2Ext {
) -> Result<Object<'gc>, Error<'gc>>; ) -> Result<Object<'gc>, Error<'gc>>;
} }
#[derive(Clone, Collect)]
#[collect(require_static)]
pub struct ObjectWrapper {
root: DynamicRoot<Rootable![Object<'gc>]>,
}
impl ShaderObject for ObjectWrapper {
fn clone_box(&self) -> Box<dyn ShaderObject> {
Box::new(self.clone())
}
}
impl Debug for ObjectWrapper {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ObjectWrapper")
.field("root", &self.root.as_ptr())
.finish()
}
}
impl FilterAvm2Ext for Filter { impl FilterAvm2Ext for Filter {
fn from_avm2_object<'gc>( fn from_avm2_object<'gc>(
activation: &mut Activation<'_, 'gc>, activation: &mut Activation<'_, 'gc>,
@ -74,6 +100,13 @@ impl FilterAvm2Ext for Filter {
)?)); )?));
} }
let shader_filter = activation.avm2().classes().shaderfilter;
if object.is_of_type(shader_filter, &mut activation.context) {
return Ok(Filter::ShaderFilter(avm2_to_shader_filter(
activation, object,
)?));
}
Err(Error::AvmError(type_error( Err(Error::AvmError(type_error(
activation, activation,
&format!( &format!(
@ -105,6 +138,7 @@ impl FilterAvm2Ext for Filter {
let gradientglowfilter = activation.avm2().classes().gradientglowfilter; let gradientglowfilter = activation.avm2().classes().gradientglowfilter;
gradient_filter_to_avm2(activation, filter, gradientglowfilter) gradient_filter_to_avm2(activation, filter, gradientglowfilter)
} }
Filter::ShaderFilter(filter) => shader_filter_to_avm2(activation, filter),
} }
} }
} }
@ -702,6 +736,66 @@ fn gradient_filter_to_avm2<'gc>(
) )
} }
fn avm2_to_shader_filter<'gc>(
activation: &mut Activation<'_, 'gc>,
object: Object<'gc>,
) -> Result<ShaderFilter<'static>, Error<'gc>> {
let bottom_extension = object
.get_public_property("bottomExtension", activation)?
.coerce_to_i32(activation)?;
let left_extension = object
.get_public_property("leftExtension", activation)?
.coerce_to_i32(activation)?;
let right_extension = object
.get_public_property("rightExtension", activation)?
.coerce_to_i32(activation)?;
let top_extension = object
.get_public_property("topExtension", activation)?
.coerce_to_i32(activation)?;
let shader_obj = object
.get_public_property("shader", activation)?
.as_object()
.unwrap();
let dyn_root = activation
.context
.dynamic_root
.stash(activation.context.gc_context, shader_obj);
let (shader_handle, shader_args) = get_shader_args(shader_obj, activation)?;
Ok(ShaderFilter {
shader_object: Box::new(ObjectWrapper { root: dyn_root }),
shader: shader_handle,
shader_args,
bottom_extension,
left_extension,
right_extension,
top_extension,
})
}
fn shader_filter_to_avm2<'gc>(
activation: &mut Activation<'_, 'gc>,
filter: &ShaderFilter<'static>,
) -> Result<Object<'gc>, Error<'gc>> {
let object_wrapper: &ObjectWrapper = filter
.shader_object
.downcast_ref::<ObjectWrapper>()
.expect("ShaderObject was not an ObjectWrapper");
let obj = *activation.context.dynamic_root.fetch(&object_wrapper.root);
activation
.avm2()
.classes()
.shaderfilter
.construct(activation, &[obj.into()])
}
fn get_gradient_colors<'gc>( fn get_gradient_colors<'gc>(
activation: &mut Activation<'_, 'gc>, activation: &mut Activation<'_, 'gc>,
object: Object<'gc>, object: Object<'gc>,

View File

@ -139,9 +139,11 @@ pub struct SystemClasses<'gc> {
pub cubetexture: ClassObject<'gc>, pub cubetexture: ClassObject<'gc>,
pub rectangletexture: ClassObject<'gc>, pub rectangletexture: ClassObject<'gc>,
pub morphshape: ClassObject<'gc>, pub morphshape: ClassObject<'gc>,
pub shader: ClassObject<'gc>,
pub shaderinput: ClassObject<'gc>, pub shaderinput: ClassObject<'gc>,
pub shaderparameter: ClassObject<'gc>, pub shaderparameter: ClassObject<'gc>,
pub netstatusevent: ClassObject<'gc>, pub netstatusevent: ClassObject<'gc>,
pub shaderfilter: ClassObject<'gc>,
} }
impl<'gc> SystemClasses<'gc> { impl<'gc> SystemClasses<'gc> {
@ -255,9 +257,11 @@ impl<'gc> SystemClasses<'gc> {
cubetexture: object, cubetexture: object,
rectangletexture: object, rectangletexture: object,
morphshape: object, morphshape: object,
shader: object,
shaderinput: object, shaderinput: object,
shaderparameter: object, shaderparameter: object,
netstatusevent: object, netstatusevent: object,
shaderfilter: object,
} }
} }
} }
@ -742,6 +746,7 @@ fn load_playerglobal<'gc>(
("flash.filters", "GlowFilter", glowfilter), ("flash.filters", "GlowFilter", glowfilter),
("flash.filters", "GradientBevelFilter", gradientbevelfilter), ("flash.filters", "GradientBevelFilter", gradientbevelfilter),
("flash.filters", "GradientGlowFilter", gradientglowfilter), ("flash.filters", "GradientGlowFilter", gradientglowfilter),
("flash.filters", "ShaderFilter", shaderfilter),
] ]
); );

View File

@ -4,6 +4,7 @@ package flash.display {
import flash.geom.Point; import flash.geom.Point;
import flash.geom.Matrix; import flash.geom.Matrix;
import flash.filters.BitmapFilter; import flash.filters.BitmapFilter;
import flash.filters.ShaderFilter;
import flash.utils.ByteArray; import flash.utils.ByteArray;
import __ruffle__.stub_method; import __ruffle__.stub_method;
@ -68,6 +69,10 @@ package flash.display {
):int; ):int;
public function generateFilterRect(sourceRect:Rectangle, filter:BitmapFilter):Rectangle { public function generateFilterRect(sourceRect:Rectangle, filter:BitmapFilter):Rectangle {
// Flash always reports that a ShaderFilter affects the entire BitampData, ignoring SourceRect.
if (filter is ShaderFilter) {
return this.rect.clone();
}
stub_method("flash.display.BitmapData", "generateFilterRect"); stub_method("flash.display.BitmapData", "generateFilterRect");
return sourceRect.clone(); return sourceRect.clone();
} }

View File

@ -9,6 +9,7 @@ use crate::avm2::parameters::{null_parameter_error, ParametersExt};
use crate::avm2::value::Value; use crate::avm2::value::Value;
use crate::avm2::vector::VectorStorage; use crate::avm2::vector::VectorStorage;
use crate::avm2::Error; use crate::avm2::Error;
use crate::avm2_stub_method;
use crate::bitmap::bitmap_data::{ use crate::bitmap::bitmap_data::{
BitmapData, BitmapDataWrapper, ChannelOptions, ThresholdOperation, BitmapData, BitmapDataWrapper, ChannelOptions, ThresholdOperation,
}; };
@ -23,6 +24,7 @@ use gc_arena::GcCell;
use ruffle_render::filters::Filter; use ruffle_render::filters::Filter;
use ruffle_render::transform::Transform; use ruffle_render::transform::Transform;
use std::str::FromStr; use std::str::FromStr;
use swf::{Rectangle, Twips};
// Computes the integer x,y,width,height values from // Computes the integer x,y,width,height values from
// the given `Rectangle`. This method performs `x + width` // the given `Rectangle`. This method performs `x + width`
@ -1051,7 +1053,40 @@ pub fn apply_filter<'gc>(
Error::from(format!("TypeError: Error #1034: Type Coercion failed: cannot convert {} to flash.display.BitmapData.", args[0].coerce_to_string(activation).unwrap_or_default())) Error::from(format!("TypeError: Error #1034: Type Coercion failed: cannot convert {} to flash.display.BitmapData.", args[0].coerce_to_string(activation).unwrap_or_default()))
})?; })?;
let source_rect = args.get_object(activation, 1, "sourceRect")?; let source_rect = args.get_object(activation, 1, "sourceRect")?;
let source_rect = super::display_object::object_to_rectangle(activation, source_rect)?; let mut source_rect = super::display_object::object_to_rectangle(activation, source_rect)?;
let filter = args.get_object(activation, 3, "filter")?;
let filter = Filter::from_avm2_object(activation, filter)?;
if matches!(filter, Filter::ShaderFilter(_)) {
let source_bitmap_rect = Rectangle {
x_min: Twips::ZERO,
x_max: Twips::from_pixels(source_bitmap.width() as f64),
y_min: Twips::ZERO,
y_max: Twips::from_pixels(source_bitmap.height() as f64),
};
// Flash performs an odd translation/cropping behavior when sourceRect
// has a non-zero x or y starting value, which I haven't yet managed to reproduce.
//
// Additionally, when both x and y are 0, the 'width' and 'height' seem to
// be ignored completely in favor of the using the dimensions of the source
// image (even if a larger or smaller rect is passed in)
//
// To make matters worse, the behavior of ShaderFilter seems platform-dependent
// (or at least resolution-dependent). The test
// 'tests/tests/swfs/avm2/pixelbender_effect_glassDisplace_shaderfilter/test.swf'
// renders slightly differently in Linux vs a Windows VM (part of the mandelbrot fractal
// in the top image is cut off in the Windows Flash Player, but not in the Linux Flash Player)
if source_rect != source_bitmap_rect {
avm2_stub_method!(
activation,
"flash.display.BitmapData",
"applyFilter",
"ShaderFilter with non-standard sourceRect"
);
source_rect = source_bitmap_rect;
}
}
let source_point = ( let source_point = (
source_rect.x_min.to_pixels().floor() as u32, source_rect.x_min.to_pixels().floor() as u32,
source_rect.y_min.to_pixels().floor() as u32, source_rect.y_min.to_pixels().floor() as u32,
@ -1069,8 +1104,7 @@ pub fn apply_filter<'gc>(
.get_public_property("y", activation)? .get_public_property("y", activation)?
.coerce_to_u32(activation)?, .coerce_to_u32(activation)?,
); );
let filter = args.get_object(activation, 3, "filter")?;
let filter = Filter::from_avm2_object(activation, filter)?;
operations::apply_filter( operations::apply_filter(
&mut activation.context, &mut activation.context,
dest_bitmap, dest_bitmap,
@ -1079,7 +1113,7 @@ pub fn apply_filter<'gc>(
source_size, source_size,
dest_point, dest_point,
filter, filter,
) );
} }
Ok(Value::Undefined) Ok(Value::Undefined)
} }

View File

@ -1,8 +1,8 @@
use ruffle_render::{ use ruffle_render::{
bitmap::PixelRegion, bitmap::PixelRegion,
pixel_bender::{ pixel_bender::{
PixelBenderParam, PixelBenderParamQualifier, PixelBenderShaderArgument, PixelBenderType, PixelBenderParam, PixelBenderParamQualifier, PixelBenderShaderArgument,
OUT_COORD_NAME, PixelBenderShaderHandle, PixelBenderType, OUT_COORD_NAME,
}, },
}; };
@ -12,28 +12,19 @@ use crate::{
pixel_bender::PixelBenderTypeExt, pixel_bender::PixelBenderTypeExt,
}; };
/// Implements `ShaderJob.start`. pub fn get_shader_args<'gc>(
pub fn start<'gc>( shader_obj: Object<'gc>,
activation: &mut Activation<'_, 'gc>, activation: &mut Activation<'_, 'gc>,
this: Option<Object<'gc>>, ) -> Result<
_args: &[Value<'gc>], (
) -> Result<Value<'gc>, Error<'gc>> { PixelBenderShaderHandle,
let this = this.unwrap(); Vec<PixelBenderShaderArgument<'static>>,
),
avm2_stub_method!( Error<'gc>,
activation, > {
"flash.display.ShaderJob",
"start",
"async execution and non-BitmapData inputs"
);
// FIXME - determine what errors Flash Player throws here // FIXME - determine what errors Flash Player throws here
// instead of using `expect` // instead of using `expect`
let shader = this let shader_data = shader_obj
.get_public_property("shader", activation)?
.as_object()
.expect("Missing Shader object");
let shader_data = shader
.get_public_property("data", activation)? .get_public_property("data", activation)?
.as_object() .as_object()
.expect("Missing ShaderData object") .expect("Missing ShaderData object")
@ -46,7 +37,7 @@ pub fn start<'gc>(
.expect("ShaderData object has no shader"); .expect("ShaderData object has no shader");
let shader = shader_handle.0.parsed_shader(); let shader = shader_handle.0.parsed_shader();
let arguments: Vec<_> = shader let args = shader
.params .params
.iter() .iter()
.enumerate() .enumerate()
@ -108,31 +99,58 @@ pub fn start<'gc>(
let input = shader_input let input = shader_input
.get_public_property("input", activation) .get_public_property("input", activation)
.expect("Missing input property"); .expect("Missing input property");
let input = input
.as_object()
.expect("ShaderInput.input is not an object");
let bitmap = input.as_bitmap_data().expect( let texture = if let Value::Null = input {
"ShaderInput.input is not a BitmapData (FIXE - support other types)", None
); } else {
let input = input
.as_object()
.expect("ShaderInput.input is not an object");
// FIXME - this really only needs to be a CPU->GPU sync let bitmap = input.as_bitmap_data().expect(
let bitmap = bitmap.sync(); "ShaderInput.input is not a BitmapData (FIXE - support other types)",
let mut bitmap_data = bitmap.write(activation.context.gc_context); );
bitmap_data.update_dirty_texture(activation.context.renderer);
Some(bitmap.bitmap_handle(
activation.context.gc_context,
activation.context.renderer,
))
};
Some(PixelBenderShaderArgument::ImageInput { Some(PixelBenderShaderArgument::ImageInput {
index: *index, index: *index,
channels: *channels, channels: *channels,
name: name.clone(), name: name.clone(),
texture: bitmap_data texture: texture.map(|t| t.into()),
.bitmap_handle(activation.context.renderer)
.expect("Missing input BitmapHandle"),
}) })
} }
} }
}) })
.collect(); .collect();
Ok((shader_handle.clone(), args))
}
/// Implements `ShaderJob.start`.
pub fn start<'gc>(
activation: &mut Activation<'_, 'gc>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
let this = this.unwrap();
avm2_stub_method!(
activation,
"flash.display.ShaderJob",
"start",
"async execution and non-BitmapData inputs"
);
let shader = this
.get_public_property("shader", activation)?
.as_object()
.expect("Missing Shader object");
let (shader_handle, arguments) = get_shader_args(shader, activation)?;
let target = this let target = this
.get_public_property("target", activation)? .get_public_property("target", activation)?
@ -141,7 +159,7 @@ pub fn start<'gc>(
let target_bitmap = target let target_bitmap = target
.as_bitmap_data() .as_bitmap_data()
.expect("ShaderJob.target is not a BitmapData (FIXE - support other types)") .expect("ShaderJob.target is not a BitmapData (FIXME - support other types)")
.sync(); .sync();
// Perform both a GPU->CPU and CPU->GPU sync before writing to it. // Perform both a GPU->CPU and CPU->GPU sync before writing to it.
@ -156,7 +174,7 @@ pub fn start<'gc>(
let sync_handle = activation let sync_handle = activation
.context .context
.renderer .renderer
.run_pixelbender_shader(shader_handle.clone(), &arguments, target_handle) .run_pixelbender_shader(shader_handle, &arguments, target_handle)
.expect("Failed to run shader"); .expect("Failed to run shader");
let width = target_bitmap_data.width(); let width = target_bitmap_data.width();

View File

@ -16,6 +16,43 @@ package flash.filters {
this._shader = value; this._shader = value;
} }
private var _bottomExtension:int = 0;
private var _leftExtension:int = 0;
private var _rightExtension:int = 0;
private var _topExtension:int = 0;
public function get bottomExtension():int {
return this._bottomExtension;
}
public function set bottomExtension(value:int):void {
this._bottomExtension = value;
}
public function get leftExtension():int {
return this._leftExtension;
}
public function set leftExtension(value:int):void {
this._leftExtension = value;
}
public function get rightExtension():int {
return this._rightExtension;
}
public function set rightExtension(value:int):void {
this._rightExtension = value;
}
public function get topExtension():int {
return this._topExtension;
}
public function set topExtension(value:int):void {
this._topExtension = value;
}
// ShaderFilter is the only filter class that doesn't override clone // ShaderFilter is the only filter class that doesn't override clone
} }
} }

View File

@ -218,7 +218,6 @@ pub struct UpdateContext<'a, 'gc> {
pub stream_manager: &'a mut StreamManager<'gc>, pub stream_manager: &'a mut StreamManager<'gc>,
/// Dynamic root for allowing handles to GC objects to exist outside of the GC. /// Dynamic root for allowing handles to GC objects to exist outside of the GC.
#[cfg(feature = "egui")]
pub dynamic_root: gc_arena::DynamicRootSet<'gc>, pub dynamic_root: gc_arena::DynamicRootSet<'gc>,
} }
@ -377,7 +376,6 @@ impl<'a, 'gc> UpdateContext<'a, 'gc> {
actions_since_timeout_check: self.actions_since_timeout_check, actions_since_timeout_check: self.actions_since_timeout_check,
frame_phase: self.frame_phase, frame_phase: self.frame_phase,
stream_manager: self.stream_manager, stream_manager: self.stream_manager,
#[cfg(feature = "egui")]
dynamic_root: self.dynamic_root, dynamic_root: self.dynamic_root,
} }
} }

View File

@ -36,6 +36,9 @@ impl PixelBenderTypeExt for PixelBenderType {
| PixelBenderTypeOpcode::TFloat2 | PixelBenderTypeOpcode::TFloat2
| PixelBenderTypeOpcode::TFloat3 | PixelBenderTypeOpcode::TFloat3
| PixelBenderTypeOpcode::TFloat4 | PixelBenderTypeOpcode::TFloat4
| PixelBenderTypeOpcode::TFloat2x2
| PixelBenderTypeOpcode::TFloat3x3
| PixelBenderTypeOpcode::TFloat4x4
); );
match value { match value {
@ -69,6 +72,15 @@ impl PixelBenderTypeExt for PixelBenderType {
vals.next().unwrap(), vals.next().unwrap(),
vals.next().unwrap(), vals.next().unwrap(),
)), )),
PixelBenderTypeOpcode::TFloat2x2 => Ok(PixelBenderType::TFloat2x2(
vals.collect::<Vec<_>>().try_into().unwrap(),
)),
PixelBenderTypeOpcode::TFloat3x3 => Ok(PixelBenderType::TFloat3x3(
vals.collect::<Vec<_>>().try_into().unwrap(),
)),
PixelBenderTypeOpcode::TFloat4x4 => Ok(PixelBenderType::TFloat4x4(
vals.collect::<Vec<_>>().try_into().unwrap(),
)),
_ => unreachable!("Unexpected float kind {kind:?}"), _ => unreachable!("Unexpected float kind {kind:?}"),
} }
} else { } else {

View File

@ -1814,7 +1814,6 @@ impl Player {
frame_phase: &mut self.frame_phase, frame_phase: &mut self.frame_phase,
stub_tracker: &mut self.stub_tracker, stub_tracker: &mut self.stub_tracker,
stream_manager, stream_manager,
#[cfg(feature = "egui")]
dynamic_root, dynamic_root,
}; };

View File

@ -32,7 +32,7 @@ bytemuck = "1.13.1"
os_info = { version = "3", default-features = false } os_info = { version = "3", default-features = false }
unic-langid = "0.9.1" unic-langid = "0.9.1"
sys-locale = "0.3.0" sys-locale = "0.3.0"
wgpu = { version = "0.16.1" } wgpu = { workspace = true }
futures = "0.3.28" futures = "0.3.28"
chrono = { version = "0.4", default-features = false, features = [] } chrono = { version = "0.4", default-features = false, features = [] }
fluent-templates = "0.8.0" fluent-templates = "0.8.0"

View File

@ -28,6 +28,7 @@ lru = "0.10.0"
num-traits = "0.2" num-traits = "0.2"
num-derive = "0.3" num-derive = "0.3"
byteorder = "1.4" byteorder = "1.4"
wgpu = { workspace = true, optional = true }
[dependencies.jpeg-decoder] [dependencies.jpeg-decoder]
version = "0.3.0" version = "0.3.0"
@ -40,3 +41,4 @@ approx = "0.5.1"
default = [] default = []
tessellator = ["lyon"] tessellator = ["lyon"]
web = ["wasm-bindgen"] web = ["wasm-bindgen"]
wgpu = ["dep:wgpu"]

View File

@ -10,6 +10,7 @@ version.workspace = true
[dependencies] [dependencies]
ruffle_render = { path = "../" } ruffle_render = { path = "../" }
naga = { workspace = true } naga = { workspace = true }
naga_oil = { workspace = true }
anyhow = "1.0.71" anyhow = "1.0.71"
bitflags = "2.3.2" bitflags = "2.3.2"

File diff suppressed because it is too large Load Diff

View File

@ -120,6 +120,12 @@ impl_downcast!(ShaderModule);
pub trait Texture: Downcast + Collect {} pub trait Texture: Downcast + Collect {}
impl_downcast!(Texture); impl_downcast!(Texture);
pub trait RawTexture: Downcast + Debug {}
impl_downcast!(RawTexture);
#[cfg(feature = "wgpu")]
impl RawTexture for wgpu::Texture {}
#[derive(Collect, Debug, Copy, Clone)] #[derive(Collect, Debug, Copy, Clone)]
#[collect(require_static)] #[collect(require_static)]
pub enum Context3DTextureFormat { pub enum Context3DTextureFormat {

View File

@ -1,4 +1,10 @@
use crate::bitmap::BitmapHandle; use crate::{
bitmap::BitmapHandle,
pixel_bender::{PixelBenderShaderArgument, PixelBenderShaderHandle},
};
use downcast_rs::{impl_downcast, Downcast};
use gc_arena::Collect;
use std::fmt::Debug;
use swf::Color; use swf::Color;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -12,6 +18,33 @@ pub enum Filter {
GlowFilter(swf::GlowFilter), GlowFilter(swf::GlowFilter),
GradientBevelFilter(swf::GradientFilter), GradientBevelFilter(swf::GradientFilter),
GradientGlowFilter(swf::GradientFilter), GradientGlowFilter(swf::GradientFilter),
ShaderFilter(ShaderFilter<'static>),
}
#[derive(Debug, Clone)]
pub struct ShaderFilter<'a> {
pub bottom_extension: i32,
pub left_extension: i32,
pub right_extension: i32,
pub top_extension: i32,
/// The AVM2 `flash.display.Shader` object that we extracted
/// the `shader` and `shader_args` fields from. This is used when
/// we reconstruct a `ShaderFilter` object in the AVM2 `DisplayObject.filters`
/// (Flash re-uses the same object)
pub shader_object: Box<dyn ShaderObject>,
pub shader: PixelBenderShaderHandle,
pub shader_args: Vec<PixelBenderShaderArgument<'a>>,
}
pub trait ShaderObject: Downcast + Collect + Debug {
fn clone_box(&self) -> Box<dyn ShaderObject>;
}
impl_downcast!(ShaderObject);
impl Clone for Box<dyn ShaderObject> {
fn clone(&self) -> Self {
self.clone_box()
}
} }
impl From<&swf::Filter> for Filter { impl From<&swf::Filter> for Filter {

View File

@ -6,6 +6,7 @@ pub mod error;
pub mod filters; pub mod filters;
pub mod matrix; pub mod matrix;
pub mod pixel_bender; pub mod pixel_bender;
pub mod shader_source;
pub mod shape_utils; pub mod shape_utils;
pub mod transform; pub mod transform;
pub mod utils; pub mod utils;

View File

@ -14,7 +14,7 @@ use std::{
sync::Arc, sync::Arc,
}; };
use crate::bitmap::BitmapHandle; use crate::{backend::RawTexture, bitmap::BitmapHandle};
/// The name of a special parameter, which gets automatically filled in with the coordinates /// The name of a special parameter, which gets automatically filled in with the coordinates
/// of the pixel being processed. /// of the pixel being processed.
@ -69,9 +69,17 @@ pub enum PixelBenderRegChannel {
G = 1, G = 1,
B = 2, B = 2,
A = 3, A = 3,
M2x2 = 4,
M3x3 = 5,
M4x4 = 6,
} }
impl PixelBenderRegChannel { impl PixelBenderRegChannel {
pub const RGB: [PixelBenderRegChannel; 3] = [
PixelBenderRegChannel::R,
PixelBenderRegChannel::G,
PixelBenderRegChannel::B,
];
pub const RGBA: [PixelBenderRegChannel; 4] = [ pub const RGBA: [PixelBenderRegChannel; 4] = [
PixelBenderRegChannel::R, PixelBenderRegChannel::R,
PixelBenderRegChannel::G, PixelBenderRegChannel::G,
@ -229,12 +237,12 @@ pub enum Operation {
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum PixelBenderShaderArgument { pub enum PixelBenderShaderArgument<'a> {
ImageInput { ImageInput {
index: u8, index: u8,
channels: u8, channels: u8,
name: String, name: String,
texture: BitmapHandle, texture: Option<ImageInputTexture<'a>>,
}, },
ValueInput { ValueInput {
index: u8, index: u8,
@ -242,6 +250,28 @@ pub enum PixelBenderShaderArgument {
}, },
} }
/// An image input. This accepts both an owned BitmapHandle,
/// and a borrowed texture (used when applying a filter to
/// a texture that we don't have ownership of, and therefore
/// cannot construct a BitmapHandle for).
#[derive(Debug, Clone)]
pub enum ImageInputTexture<'a> {
Bitmap(BitmapHandle),
TextureRef(&'a dyn RawTexture),
}
impl From<BitmapHandle> for ImageInputTexture<'_> {
fn from(b: BitmapHandle) -> Self {
ImageInputTexture::Bitmap(b)
}
}
impl<'a> From<&'a dyn RawTexture> for ImageInputTexture<'a> {
fn from(t: &'a dyn RawTexture) -> Self {
ImageInputTexture::TextureRef(t)
}
}
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct PixelBenderShader { pub struct PixelBenderShader {
pub name: String, pub name: String,
@ -292,14 +322,17 @@ pub fn parse_shader(mut data: &[u8]) -> Result<PixelBenderShader, Box<dyn std::e
Ok(shader) Ok(shader)
} }
fn read_src_reg(val: u32, size: u8) -> Result<PixelBenderReg, Box<dyn std::error::Error>> { const CHANNELS: [PixelBenderRegChannel; 7] = [
const CHANNELS: [PixelBenderRegChannel; 4] = [ PixelBenderRegChannel::R,
PixelBenderRegChannel::R, PixelBenderRegChannel::G,
PixelBenderRegChannel::G, PixelBenderRegChannel::B,
PixelBenderRegChannel::B, PixelBenderRegChannel::A,
PixelBenderRegChannel::A, PixelBenderRegChannel::M2x2,
]; PixelBenderRegChannel::M3x3,
PixelBenderRegChannel::M4x4,
];
fn read_src_reg(val: u32, size: u8) -> Result<PixelBenderReg, Box<dyn std::error::Error>> {
let swizzle = val >> 16; let swizzle = val >> 16;
let mut channels = Vec::new(); let mut channels = Vec::new();
for i in 0..size { for i in 0..size {
@ -320,6 +353,10 @@ fn read_src_reg(val: u32, size: u8) -> Result<PixelBenderReg, Box<dyn std::error
}) })
} }
fn read_matrix_reg(val: u16, mask: u8) -> PixelBenderReg {
read_reg(val, vec![CHANNELS[(mask + 3) as usize]])
}
fn read_dst_reg(val: u16, mask: u8) -> Result<PixelBenderReg, Box<dyn std::error::Error>> { fn read_dst_reg(val: u16, mask: u8) -> Result<PixelBenderReg, Box<dyn std::error::Error>> {
let mut channels = Vec::new(); let mut channels = Vec::new();
if mask & 0x8 != 0 { if mask & 0x8 != 0 {
@ -335,18 +372,22 @@ fn read_dst_reg(val: u16, mask: u8) -> Result<PixelBenderReg, Box<dyn std::error
channels.push(PixelBenderRegChannel::A); channels.push(PixelBenderRegChannel::A);
} }
Ok(read_reg(val, channels))
}
fn read_reg(val: u16, channels: Vec<PixelBenderRegChannel>) -> PixelBenderReg {
let kind = if val & 0x8000 != 0 { let kind = if val & 0x8000 != 0 {
PixelBenderRegKind::Int PixelBenderRegKind::Int
} else { } else {
PixelBenderRegKind::Float PixelBenderRegKind::Float
}; };
Ok(PixelBenderReg { PixelBenderReg {
// Mask off the 0x8000 bit // Mask off the 0x8000 bit
index: (val & 0x7FFF) as u32, index: (val & 0x7FFF) as u32,
channels, channels,
kind, kind,
}) }
} }
fn read_op<R: Read>( fn read_op<R: Read>(
@ -385,21 +426,45 @@ fn read_op<R: Read>(
let param_type = PixelBenderTypeOpcode::from_u8(param_type).unwrap_or_else(|| { let param_type = PixelBenderTypeOpcode::from_u8(param_type).unwrap_or_else(|| {
panic!("Unexpected param type {param_type}"); panic!("Unexpected param type {param_type}");
}); });
// Note - we deviate from Haxe's parser code here. We assert that the provided mask value
// is as expected, but we then construct a Matrix channel register as the dest reg,
// which helps our naga-pixelbender backend.
let dst_reg = match param_type {
PixelBenderTypeOpcode::TFloat2x2 => {
assert_eq!(mask, 2);
PixelBenderReg {
index: reg as u32,
channels: vec![PixelBenderRegChannel::M2x2],
kind: PixelBenderRegKind::Float,
}
}
PixelBenderTypeOpcode::TFloat3x3 => {
assert_eq!(mask, 3);
PixelBenderReg {
index: reg as u32,
channels: vec![PixelBenderRegChannel::M3x3],
kind: PixelBenderRegKind::Float,
}
}
PixelBenderTypeOpcode::TFloat4x4 => {
assert_eq!(mask, 4);
PixelBenderReg {
index: reg as u32,
channels: vec![PixelBenderRegChannel::M4x4],
kind: PixelBenderRegKind::Float,
}
}
_ => {
assert_eq!(mask >> 4, 0);
read_dst_reg(reg, mask)?
}
};
let qualifier = PixelBenderParamQualifier::from_u8(qualifier) let qualifier = PixelBenderParamQualifier::from_u8(qualifier)
.unwrap_or_else(|| panic!("Unexpected param qualifier {qualifier:?}")); .unwrap_or_else(|| panic!("Unexpected param qualifier {qualifier:?}"));
apply_metadata(shader, metadata); apply_metadata(shader, metadata);
match param_type {
PixelBenderTypeOpcode::TFloat2x2
| PixelBenderTypeOpcode::TFloat3x3
| PixelBenderTypeOpcode::TFloat4x4 => {
panic!("Unsupported param type {param_type:?}");
}
_ => {}
}
let dst_reg = read_dst_reg(reg, mask)?;
shader.params.push(PixelBenderParam::Normal { shader.params.push(PixelBenderParam::Normal {
qualifier, qualifier,
param_type, param_type,
@ -498,19 +563,26 @@ fn read_op<R: Read>(
assert_eq!(data.read_u8()?, 0, "Unexpected u8 for opcode {opcode:?}"); assert_eq!(data.read_u8()?, 0, "Unexpected u8 for opcode {opcode:?}");
mask >>= 4; mask >>= 4;
let src_reg = read_src_reg(src, size)?; if matrix != 0 {
let dst_reg = if matrix != 0 {
assert_eq!(src >> 16, 0); assert_eq!(src >> 16, 0);
assert_eq!(size, 1); assert_eq!(size, 1);
panic!("Matrix with mask {mask:b} matrix {matrix:b}"); let dst = if mask == 0 {
read_matrix_reg(dst, matrix)
} else {
read_dst_reg(dst, mask)?
};
shader.operations.push(Operation::Normal {
opcode,
dst,
src: read_matrix_reg(src as u16, matrix),
});
} else { } else {
read_dst_reg(dst, mask)? let dst = read_dst_reg(dst, mask)?;
let src = read_src_reg(src, size)?;
shader
.operations
.push(Operation::Normal { opcode, dst, src })
}; };
shader.operations.push(Operation::Normal {
opcode,
dst: dst_reg,
src: src_reg,
})
} }
}; };
Ok(()) Ok(())

View File

@ -1,5 +1,4 @@
#define_import_path filter #define_import_path filter
#import common
struct VertexOutput { struct VertexOutput {
@builtin(position) position: vec4<f32>, @builtin(position) position: vec4<f32>,

View File

@ -0,0 +1 @@
pub const SHADER_FILTER_COMMON: &str = include_str!("shader_filter_common.wgsl");

View File

@ -8,9 +8,9 @@ repository.workspace = true
version.workspace = true version.workspace = true
[dependencies] [dependencies]
wgpu = { version = "0.16", features = ["naga"] } wgpu = { workspace = true, features = ["naga"] }
tracing = { workspace = true } tracing = { workspace = true }
ruffle_render = { path = "..", features = ["tessellator"] } ruffle_render = { path = "..", features = ["tessellator", "wgpu"] }
bytemuck = { version = "1.13.1", features = ["derive"] } bytemuck = { version = "1.13.1", features = ["derive"] }
raw-window-handle = "0.5" raw-window-handle = "0.5"
clap = { version = "4.3.8", features = ["derive"], optional = true } clap = { version = "4.3.8", features = ["derive"], optional = true }
@ -18,7 +18,7 @@ enum-map = "2.5.0"
fnv = "1.0.7" fnv = "1.0.7"
swf = { path = "../../swf" } swf = { path = "../../swf" }
image = { version = "0.24.6", default-features = false } image = { version = "0.24.6", default-features = false }
naga_oil = "0.7.0" naga_oil = { workspace = true }
ouroboros = "0.17.0" ouroboros = "0.17.0"
typed-arena = "2.0.2" typed-arena = "2.0.2"
gc-arena = { workspace = true } gc-arena = { workspace = true }

View File

@ -3,6 +3,7 @@ use crate::buffer_pool::{BufferPool, TexturePool};
use crate::context3d::WgpuContext3D; use crate::context3d::WgpuContext3D;
use crate::filters::FilterSource; use crate::filters::FilterSource;
use crate::mesh::{Mesh, PendingDraw}; use crate::mesh::{Mesh, PendingDraw};
use crate::pixel_bender::{run_pixelbender_shader_impl, ShaderMode};
use crate::surface::{LayerRef, Surface}; use crate::surface::{LayerRef, Surface};
use crate::target::{MaybeOwnedBuffer, TextureTarget}; use crate::target::{MaybeOwnedBuffer, TextureTarget};
use crate::target::{RenderTargetFrame, TextureBufferInfo}; use crate::target::{RenderTargetFrame, TextureBufferInfo};
@ -34,6 +35,7 @@ use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use swf::Color; use swf::Color;
use tracing::instrument; use tracing::instrument;
use wgpu::SubmissionIndex;
/// How many times a texture must be written to & read back from, /// How many times a texture must be written to & read back from,
/// before it's automatically allocated a buffer on each write. /// before it's automatically allocated a buffer on each write.
@ -309,6 +311,58 @@ impl<T: RenderTarget> WgpuRenderBackend<T> {
pub fn device(&self) -> &wgpu::Device { pub fn device(&self) -> &wgpu::Device {
&self.descriptors.device &self.descriptors.device
} }
pub fn make_queue_sync_handle(
&self,
target: TextureTarget,
index: SubmissionIndex,
destination: BitmapHandle,
copy_area: PixelRegion,
) -> Box<QueueSyncHandle> {
match target.take_buffer() {
None => Box::new(QueueSyncHandle::NotCopied {
handle: destination,
copy_area,
descriptors: self.descriptors.clone(),
pool: self.offscreen_buffer_pool.clone(),
}),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
..
}) => Box::new(QueueSyncHandle::AlreadyCopied {
index,
buffer,
copy_dimensions,
descriptors: self.descriptors.clone(),
}),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Owned(..),
..
}) => unreachable!("Buffer must be Borrowed as it was set to be Borrowed earlier"),
}
}
fn get_texture_buffer_info(
&self,
texture: &Texture,
copy_area: PixelRegion,
) -> Option<TextureBufferInfo> {
if texture.copy_count.get() >= TEXTURE_READS_BEFORE_PROMOTION {
let copy_dimensions = BufferDimensions::new(
texture.texture.width() as usize,
texture.texture.height() as usize,
);
let buffer = self
.offscreen_buffer_pool
.take(&self.descriptors, copy_dimensions.clone());
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
copy_area,
})
} else {
None
}
}
} }
impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> { impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
@ -685,19 +739,7 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let buffer_info = if texture.copy_count.get() > TEXTURE_READS_BEFORE_PROMOTION { let buffer_info = self.get_texture_buffer_info(texture, bounds);
let copy_dimensions =
BufferDimensions::new(bounds.width() as usize, bounds.height() as usize);
let buffer = self
.offscreen_buffer_pool
.take(&self.descriptors, copy_dimensions.clone());
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
copy_area: bounds,
})
} else {
None
};
let mut target = TextureTarget { let mut target = TextureTarget {
size: extent, size: extent,
@ -757,31 +799,14 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
self.uniform_buffers_storage.recall(); self.uniform_buffers_storage.recall();
self.color_buffers_storage.recall(); self.color_buffers_storage.recall();
match target.take_buffer() { Some(self.make_queue_sync_handle(target, index, handle, bounds))
None => Some(Box::new(QueueSyncHandle::NotCopied {
handle,
copy_area: bounds,
descriptors: self.descriptors.clone(),
pool: self.offscreen_buffer_pool.clone(),
})),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
..
}) => Some(Box::new(QueueSyncHandle::AlreadyCopied {
index,
buffer,
copy_dimensions,
descriptors: self.descriptors.clone(),
})),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Owned(..),
..
}) => unreachable!("Buffer must be Borrowed as it was set to be Borrowed earlier"),
}
} }
fn is_filter_supported(&self, filter: &Filter) -> bool { fn is_filter_supported(&self, filter: &Filter) -> bool {
matches!(filter, Filter::BlurFilter(_) | Filter::ColorMatrixFilter(_)) matches!(
filter,
Filter::BlurFilter(_) | Filter::ColorMatrixFilter(_) | Filter::ShaderFilter(_)
)
} }
fn is_offscreen_supported(&self) -> bool { fn is_offscreen_supported(&self) -> bool {
@ -804,21 +829,8 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
dest_texture.texture.width(), dest_texture.texture.width(),
dest_texture.texture.height(), dest_texture.texture.height(),
); );
let buffer_info = if dest_texture.copy_count.get() >= TEXTURE_READS_BEFORE_PROMOTION {
let copy_dimensions = BufferDimensions::new( let buffer_info = self.get_texture_buffer_info(dest_texture, copy_area);
dest_texture.texture.width() as usize,
dest_texture.texture.height() as usize,
);
let buffer = self
.offscreen_buffer_pool
.take(&self.descriptors, copy_dimensions.clone());
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
copy_area,
})
} else {
None
};
let mut target = TextureTarget { let mut target = TextureTarget {
size: wgpu::Extent3d { size: wgpu::Extent3d {
@ -840,6 +852,7 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
.create_command_encoder(&wgpu::CommandEncoderDescriptor { .create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: label.as_deref(), label: label.as_deref(),
}); });
let applied_filter = self.descriptors.filters.apply( let applied_filter = self.descriptors.filters.apply(
&self.descriptors, &self.descriptors,
&mut draw_encoder, &mut draw_encoder,
@ -881,27 +894,7 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
frame_output, frame_output,
); );
match target.take_buffer() { Some(self.make_queue_sync_handle(target, index, destination, copy_area))
None => Some(Box::new(QueueSyncHandle::NotCopied {
handle: destination,
copy_area,
descriptors: self.descriptors.clone(),
pool: self.offscreen_buffer_pool.clone(),
})),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Borrowed(buffer, copy_dimensions),
..
}) => Some(Box::new(QueueSyncHandle::AlreadyCopied {
index,
buffer,
copy_dimensions,
descriptors: self.descriptors.clone(),
})),
Some(TextureBufferInfo {
buffer: MaybeOwnedBuffer::Owned(..),
..
}) => unreachable!("Buffer must be Borrowed as it was set to be Borrowed earlier"),
}
} }
fn compile_pixelbender_shader( fn compile_pixelbender_shader(
@ -917,7 +910,67 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
arguments: &[PixelBenderShaderArgument], arguments: &[PixelBenderShaderArgument],
target_handle: BitmapHandle, target_handle: BitmapHandle,
) -> Result<Box<dyn SyncHandle>, BitmapError> { ) -> Result<Box<dyn SyncHandle>, BitmapError> {
self.run_pixelbender_shader_impl(shader, arguments, target_handle) let target = as_texture(&target_handle);
let extent = wgpu::Extent3d {
width: target.texture.width(),
height: target.texture.height(),
depth_or_array_layers: 1,
};
let buffer_info = self.get_texture_buffer_info(
target,
PixelRegion::for_whole_size(target.texture.width(), target.texture.height()),
);
let mut texture_target = TextureTarget {
size: extent,
texture: target.texture.clone(),
format: wgpu::TextureFormat::Rgba8Unorm,
buffer: buffer_info,
};
let frame_output = texture_target
.get_next_texture()
.expect("TextureTargetFrame.get_next_texture is infallible");
let mut render_command_encoder =
self.descriptors
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: create_debug_label!("Render command encoder").as_deref(),
});
run_pixelbender_shader_impl(
&self.descriptors,
shader,
ShaderMode::ShaderJob,
arguments,
&target.texture,
&mut render_command_encoder,
Some(wgpu::RenderPassColorAttachment {
view: frame_output.view(),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: true,
},
}),
// When running a standalone shader, we always process the entire image
&FilterSource::for_entire_texture(&target.texture),
)?;
let index = self
.descriptors
.queue
.submit(Some(render_command_encoder.finish()));
Ok(self.make_queue_sync_handle(
texture_target,
index,
target_handle,
PixelRegion::for_whole_size(extent.width, extent.height),
))
} }
fn create_empty_texture( fn create_empty_texture(

View File

@ -1,16 +1,19 @@
mod blur; mod blur;
mod color_matrix; mod color_matrix;
mod shader;
use crate::buffer_pool::TexturePool; use crate::buffer_pool::TexturePool;
use crate::descriptors::Descriptors; use crate::descriptors::Descriptors;
use crate::filters::blur::BlurFilter; use crate::filters::blur::BlurFilter;
use crate::filters::color_matrix::ColorMatrixFilter; use crate::filters::color_matrix::ColorMatrixFilter;
use crate::filters::shader::ShaderFilter;
use crate::surface::target::CommandTarget; use crate::surface::target::CommandTarget;
use bytemuck::{Pod, Zeroable}; use bytemuck::{Pod, Zeroable};
use ruffle_render::filters::Filter; use ruffle_render::filters::Filter;
use wgpu::util::DeviceExt; use wgpu::util::DeviceExt;
use wgpu::vertex_attr_array; use wgpu::vertex_attr_array;
#[derive(Debug)]
pub struct FilterSource<'a> { pub struct FilterSource<'a> {
pub texture: &'a wgpu::Texture, pub texture: &'a wgpu::Texture,
pub point: (u32, u32), pub point: (u32, u32),
@ -61,6 +64,7 @@ impl<'a> FilterSource<'a> {
pub struct Filters { pub struct Filters {
pub blur: BlurFilter, pub blur: BlurFilter,
pub color_matrix: ColorMatrixFilter, pub color_matrix: ColorMatrixFilter,
pub shader: ShaderFilter,
} }
impl Filters { impl Filters {
@ -68,6 +72,7 @@ impl Filters {
Self { Self {
blur: BlurFilter::new(device), blur: BlurFilter::new(device),
color_matrix: ColorMatrixFilter::new(device), color_matrix: ColorMatrixFilter::new(device),
shader: ShaderFilter::new(),
} }
} }
@ -94,6 +99,13 @@ impl Filters {
&source, &source,
&filter, &filter,
), ),
Filter::ShaderFilter(shader) => Some(descriptors.filters.shader.apply(
descriptors,
texture_pool,
draw_encoder,
&source,
shader,
)),
_ => { _ => {
tracing::warn!("Unsupported filter {filter:?}"); tracing::warn!("Unsupported filter {filter:?}");
None None

View File

@ -0,0 +1,72 @@
use ruffle_render::{
filters::ShaderFilter as ShaderFilterArgs,
pixel_bender::{ImageInputTexture, PixelBenderShaderArgument},
};
use crate::{
backend::RenderTargetMode,
buffer_pool::TexturePool,
descriptors::Descriptors,
pixel_bender::{run_pixelbender_shader_impl, ShaderMode},
surface::target::CommandTarget,
};
use super::FilterSource;
/// All of the data is stored in the `ShaderFilterArgs`
#[derive(Default)]
pub struct ShaderFilter;
impl ShaderFilter {
pub fn new() -> Self {
Self
}
#[allow(clippy::too_many_arguments)]
pub fn apply<'a>(
&self,
descriptors: &Descriptors,
texture_pool: &mut TexturePool,
draw_encoder: &mut wgpu::CommandEncoder,
source: &FilterSource<'a>,
mut filter: ShaderFilterArgs<'a>,
) -> CommandTarget {
let sample_count = source.texture.sample_count();
let format = source.texture.format();
let target = CommandTarget::new(
descriptors,
texture_pool,
wgpu::Extent3d {
width: source.size.0,
height: source.size.1,
depth_or_array_layers: 1,
},
format,
sample_count,
RenderTargetMode::FreshWithColor(wgpu::Color::TRANSPARENT),
draw_encoder,
);
for arg in &mut filter.shader_args {
if let PixelBenderShaderArgument::ImageInput { texture, .. } = arg {
*texture = Some(ImageInputTexture::TextureRef(source.texture));
// Only bind the first input from the source texture
break;
}
}
run_pixelbender_shader_impl(
descriptors,
filter.shader,
ShaderMode::Filter,
&filter.shader_args,
target.color_texture(),
draw_encoder,
target.color_attachments(),
source,
)
.expect("Failed to run pixelbender shader");
target
}
}

View File

@ -14,6 +14,7 @@ use crate::utils::{
use bytemuck::{Pod, Zeroable}; use bytemuck::{Pod, Zeroable};
use descriptors::Descriptors; use descriptors::Descriptors;
use enum_map::Enum; use enum_map::Enum;
use ruffle_render::backend::RawTexture;
use ruffle_render::bitmap::{BitmapHandle, BitmapHandleImpl, PixelRegion, RgbaBufRead, SyncHandle}; use ruffle_render::bitmap::{BitmapHandle, BitmapHandleImpl, PixelRegion, RgbaBufRead, SyncHandle};
use ruffle_render::shape_utils::GradientType; use ruffle_render::shape_utils::GradientType;
use ruffle_render::tessellator::{Gradient as TessGradient, Vertex as TessVertex}; use ruffle_render::tessellator::{Gradient as TessGradient, Vertex as TessVertex};
@ -54,6 +55,10 @@ pub fn as_texture(handle: &BitmapHandle) -> &Texture {
<dyn BitmapHandleImpl>::downcast_ref(&*handle.0).unwrap() <dyn BitmapHandleImpl>::downcast_ref(&*handle.0).unwrap()
} }
pub fn raw_texture_as_texture(handle: &dyn RawTexture) -> &wgpu::Texture {
<dyn RawTexture>::downcast_ref(handle).unwrap()
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Enum)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Enum)]
pub enum MaskState { pub enum MaskState {
NoMask, NoMask,

View File

@ -5,27 +5,25 @@ use std::{borrow::Cow, cell::Cell, sync::Arc};
use indexmap::IndexMap; use indexmap::IndexMap;
use ruffle_render::error::Error as BitmapError; use ruffle_render::error::Error as BitmapError;
use ruffle_render::pixel_bender::{ use ruffle_render::pixel_bender::{
PixelBenderShaderHandle, PixelBenderShaderImpl, PixelBenderType, OUT_COORD_NAME, ImageInputTexture, PixelBenderShaderHandle, PixelBenderShaderImpl, PixelBenderType,
OUT_COORD_NAME,
}; };
use ruffle_render::{ use ruffle_render::{
bitmap::{BitmapHandle, PixelRegion, SyncHandle}, bitmap::BitmapHandle,
pixel_bender::{PixelBenderParam, PixelBenderShader, PixelBenderShaderArgument}, pixel_bender::{PixelBenderParam, PixelBenderShader, PixelBenderShaderArgument},
}; };
use wgpu::util::StagingBelt; use wgpu::util::StagingBelt;
use wgpu::{ use wgpu::{
BindGroupEntry, BindingResource, BlendComponent, BufferDescriptor, BufferUsages, BindGroupEntry, BindingResource, BlendComponent, BufferDescriptor, BufferUsages,
ColorTargetState, ColorWrites, FrontFace, ImageCopyTexture, RenderPipelineDescriptor, ColorTargetState, ColorWrites, CommandEncoder, FrontFace, ImageCopyTexture,
SamplerBindingType, ShaderModuleDescriptor, TextureDescriptor, TextureFormat, TextureView, RenderPipelineDescriptor, SamplerBindingType, ShaderModuleDescriptor, TextureDescriptor,
VertexState, TextureFormat, TextureView, VertexState,
}; };
use crate::filters::{FilterSource, VERTEX_BUFFERS_DESCRIPTION_FILTERS};
use crate::raw_texture_as_texture;
use crate::{ use crate::{
as_texture, as_texture, backend::WgpuRenderBackend, descriptors::Descriptors, target::RenderTarget, Texture,
backend::WgpuRenderBackend,
descriptors::Descriptors,
pipelines::VERTEX_BUFFERS_DESCRIPTION_POS,
target::{RenderTarget, RenderTargetFrame, TextureTarget},
QueueSyncHandle, Texture,
}; };
#[derive(Debug)] #[derive(Debug)]
@ -37,6 +35,7 @@ pub struct PixelBenderWgpuShader {
float_parameters_buffer_size: u64, float_parameters_buffer_size: u64,
int_parameters_buffer: wgpu::Buffer, int_parameters_buffer: wgpu::Buffer,
int_parameters_buffer_size: u64, int_parameters_buffer_size: u64,
zeroed_out_of_range_mode: wgpu::Buffer,
staging_belt: RefCell<StagingBelt>, staging_belt: RefCell<StagingBelt>,
} }
@ -94,6 +93,16 @@ impl PixelBenderWgpuShader {
}, },
count: None, count: None,
}, },
wgpu::BindGroupLayoutEntry {
binding: naga_pixelbender::ZEROED_OUT_OF_RANGE_MODE_INDEX,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
]; ];
for param in &shader.params { for param in &shader.params {
@ -156,6 +165,14 @@ impl PixelBenderWgpuShader {
mapped_at_creation: false, mapped_at_creation: false,
}); });
let zeroed_out_of_range_mode = descriptors.device.create_buffer(&BufferDescriptor {
label: create_debug_label!("PixelBender zeroed_out_of_range_mode parameter buffer")
.as_deref(),
size: shaders.int_parameters_buffer_size,
usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let vertex_shader = descriptors let vertex_shader = descriptors
.device .device
.create_shader_module(ShaderModuleDescriptor { .create_shader_module(ShaderModuleDescriptor {
@ -177,12 +194,12 @@ impl PixelBenderWgpuShader {
layout: Some(&pipeline_layout), layout: Some(&pipeline_layout),
vertex: VertexState { vertex: VertexState {
module: &vertex_shader, module: &vertex_shader,
entry_point: naga_pixelbender::SHADER_ENTRYPOINT, entry_point: naga_pixelbender::VERTEX_SHADER_ENTRYPOINT,
buffers: &VERTEX_BUFFERS_DESCRIPTION_POS, buffers: &VERTEX_BUFFERS_DESCRIPTION_FILTERS,
}, },
fragment: Some(wgpu::FragmentState { fragment: Some(wgpu::FragmentState {
module: &fragment_shader, module: &fragment_shader,
entry_point: naga_pixelbender::SHADER_ENTRYPOINT, entry_point: naga_pixelbender::FRAGMENT_SHADER_ENTRYPOINT,
targets: &[Some(ColorTargetState { targets: &[Some(ColorTargetState {
format: TextureFormat::Rgba8Unorm, format: TextureFormat::Rgba8Unorm,
// FIXME - what should this be? // FIXME - what should this be?
@ -215,12 +232,20 @@ impl PixelBenderWgpuShader {
float_parameters_buffer_size: shaders.float_parameters_buffer_size, float_parameters_buffer_size: shaders.float_parameters_buffer_size,
int_parameters_buffer, int_parameters_buffer,
int_parameters_buffer_size: shaders.int_parameters_buffer_size, int_parameters_buffer_size: shaders.int_parameters_buffer_size,
zeroed_out_of_range_mode,
// FIXME - come up with a good chunk size // FIXME - come up with a good chunk size
staging_belt: RefCell::new(StagingBelt::new(8)), staging_belt: RefCell::new(StagingBelt::new(8)),
} }
} }
} }
fn image_input_as_texture<'a>(input: &'a ImageInputTexture<'a>) -> &wgpu::Texture {
match input {
ImageInputTexture::Bitmap(handle) => &as_texture(handle).texture,
ImageInputTexture::TextureRef(raw_texture) => raw_texture_as_texture(*raw_texture),
}
}
impl<T: RenderTarget> WgpuRenderBackend<T> { impl<T: RenderTarget> WgpuRenderBackend<T> {
pub(super) fn compile_pixelbender_shader_impl( pub(super) fn compile_pixelbender_shader_impl(
&mut self, &mut self,
@ -229,262 +254,277 @@ impl<T: RenderTarget> WgpuRenderBackend<T> {
let handle = PixelBenderWgpuShader::new(&self.descriptors, shader); let handle = PixelBenderWgpuShader::new(&self.descriptors, shader);
Ok(PixelBenderShaderHandle(Arc::new(handle))) Ok(PixelBenderShaderHandle(Arc::new(handle)))
} }
}
pub(super) fn run_pixelbender_shader_impl( pub enum ShaderMode {
&mut self, ShaderJob,
shader: PixelBenderShaderHandle, Filter,
arguments: &[PixelBenderShaderArgument], }
target_handle: BitmapHandle,
) -> Result<Box<dyn SyncHandle>, BitmapError> {
let compiled_shader = &as_cache_holder(&shader);
let mut staging_belt = compiled_shader.staging_belt.borrow_mut();
let mut arguments = arguments.to_vec(); #[allow(clippy::too_many_arguments)]
pub(super) fn run_pixelbender_shader_impl(
descriptors: &Descriptors,
shader: PixelBenderShaderHandle,
mode: ShaderMode,
arguments: &[PixelBenderShaderArgument],
target: &wgpu::Texture,
render_command_encoder: &mut CommandEncoder,
color_attachment: Option<wgpu::RenderPassColorAttachment>,
// FIXME - do we cover the whole source or the whole dest?
source: &FilterSource,
) -> Result<(), BitmapError> {
let compiled_shader = &as_cache_holder(&shader);
let mut staging_belt = compiled_shader.staging_belt.borrow_mut();
let target = as_texture(&target_handle); let mut arguments = arguments.to_vec();
let extent = wgpu::Extent3d {
width: target.texture.width(),
height: target.texture.height(),
depth_or_array_layers: 1,
};
let mut texture_target = TextureTarget { let mut bind_group_entries = vec![
size: extent, BindGroupEntry {
texture: target.texture.clone(), binding: naga_pixelbender::SAMPLER_CLAMP_NEAREST,
format: wgpu::TextureFormat::Rgba8Unorm, resource: BindingResource::Sampler(&descriptors.bitmap_samplers.clamp_nearest),
buffer: None, },
}; BindGroupEntry {
binding: naga_pixelbender::SAMPLER_CLAMP_LINEAR,
resource: BindingResource::Sampler(&descriptors.bitmap_samplers.clamp_linear),
},
BindGroupEntry {
binding: naga_pixelbender::SAMPLER_CLAMP_BILINEAR,
// FIXME - create bilinear sampler
resource: BindingResource::Sampler(&descriptors.bitmap_samplers.clamp_linear),
},
BindGroupEntry {
binding: naga_pixelbender::SHADER_FLOAT_PARAMETERS_INDEX,
resource: BindingResource::Buffer(wgpu::BufferBinding {
buffer: &compiled_shader.float_parameters_buffer,
offset: 0,
size: Some(NonZeroU64::new(compiled_shader.float_parameters_buffer_size).unwrap()),
}),
},
BindGroupEntry {
binding: naga_pixelbender::SHADER_INT_PARAMETERS_INDEX,
resource: BindingResource::Buffer(wgpu::BufferBinding {
buffer: &compiled_shader.int_parameters_buffer,
offset: 0,
size: Some(NonZeroU64::new(compiled_shader.int_parameters_buffer_size).unwrap()),
}),
},
BindGroupEntry {
binding: naga_pixelbender::ZEROED_OUT_OF_RANGE_MODE_INDEX,
resource: BindingResource::Buffer(wgpu::BufferBinding {
buffer: &compiled_shader.zeroed_out_of_range_mode,
offset: 0,
size: Some(NonZeroU64::new(std::mem::size_of::<f32>() as u64).unwrap()),
}),
},
];
let frame_output = texture_target let mut zeroed_out_of_range_mode_slice = staging_belt.write_buffer(
.get_next_texture() render_command_encoder,
.expect("TextureTargetFrame.get_next_texture is infallible"); &compiled_shader.zeroed_out_of_range_mode,
0,
NonZeroU64::new(std::mem::size_of::<f32>() as u64).unwrap(),
&descriptors.device,
);
let mut bind_group_entries = vec![ zeroed_out_of_range_mode_slice.copy_from_slice(bytemuck::cast_slice(&[match mode {
BindGroupEntry { // When a Shader is run via a ShaderJob, out-of-range texture sample coordinates
binding: naga_pixelbender::SAMPLER_CLAMP_NEAREST, // seem to be clamped to the edge of the texture (despite what the docs describe)
resource: BindingResource::Sampler(&self.descriptors.bitmap_samplers.clamp_nearest), ShaderMode::ShaderJob => 0.0f32,
}, // When a Shader is run through a ShaderFilter, out-of-range texture sample coordinates
BindGroupEntry { // return transparent black (0.0, 0.0, 0.0, 0.0). This is easiest to observe with
binding: naga_pixelbender::SAMPLER_CLAMP_LINEAR, // BitmapData.applyFilter when the BitampData destination is larger than the source.
resource: BindingResource::Sampler(&self.descriptors.bitmap_samplers.clamp_linear), ShaderMode::Filter => 1.0f32,
}, }]));
BindGroupEntry { drop(zeroed_out_of_range_mode_slice);
binding: naga_pixelbender::SAMPLER_CLAMP_BILINEAR,
// FIXME - create bilinear sampler
resource: BindingResource::Sampler(&self.descriptors.bitmap_samplers.clamp_linear),
},
BindGroupEntry {
binding: naga_pixelbender::SHADER_FLOAT_PARAMETERS_INDEX,
resource: BindingResource::Buffer(wgpu::BufferBinding {
buffer: &compiled_shader.float_parameters_buffer,
offset: 0,
size: Some(
NonZeroU64::new(compiled_shader.float_parameters_buffer_size).unwrap(),
),
}),
},
BindGroupEntry {
binding: naga_pixelbender::SHADER_INT_PARAMETERS_INDEX,
resource: BindingResource::Buffer(wgpu::BufferBinding {
buffer: &compiled_shader.int_parameters_buffer,
offset: 0,
size: Some(
NonZeroU64::new(compiled_shader.int_parameters_buffer_size).unwrap(),
),
}),
},
];
let mut texture_views: IndexMap<u8, TextureView> = Default::default(); let mut texture_views: IndexMap<u8, TextureView> = Default::default();
let mut render_command_encoder = let mut target_clone = None;
self.descriptors
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: create_debug_label!("Render command encoder").as_deref(),
});
let mut target_clone = None; let mut float_offset = 0;
let mut int_offset = 0;
let mut float_offset = 0; let mut first_image = None;
let mut int_offset = 0;
for input in &mut arguments { for input in &mut arguments {
match input { match input {
PixelBenderShaderArgument::ImageInput { index, texture, .. } => { PixelBenderShaderArgument::ImageInput { index, texture, .. } => {
let input_texture = &image_input_as_texture(texture.as_ref().unwrap());
if std::ptr::eq(*input_texture, target) {
// The input is the same as the output - we need to clone the input. // The input is the same as the output - we need to clone the input.
// We will write to the original output, and use a clone of the input as a texture input binding // We will write to the original output, and use a clone of the input as a texture input binding
if std::ptr::eq( let cached_fresh_handle = target_clone.get_or_insert_with(|| {
Arc::as_ptr(&texture.0) as *const (), let extent = wgpu::Extent3d {
Arc::as_ptr(&target_handle.0) as *const (), width: target.width(),
) { height: target.height(),
let cached_fresh_handle = target_clone.get_or_insert_with(|| { depth_or_array_layers: 1,
let extent = wgpu::Extent3d { };
width: target.texture.width(), let fresh_texture = descriptors.device.create_texture(&TextureDescriptor {
height: target.texture.height(), label: Some("PixelBenderShader target clone"),
depth_or_array_layers: 1, size: extent,
}; mip_level_count: 1,
let fresh_texture = sample_count: 1,
self.descriptors.device.create_texture(&TextureDescriptor { dimension: wgpu::TextureDimension::D2,
label: Some("PixelBenderShader target clone"), format: wgpu::TextureFormat::Rgba8Unorm,
size: extent, usage: wgpu::TextureUsages::COPY_DST
mip_level_count: 1, | wgpu::TextureUsages::TEXTURE_BINDING,
sample_count: 1, view_formats: &[wgpu::TextureFormat::Rgba8Unorm],
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[wgpu::TextureFormat::Rgba8Unorm],
});
render_command_encoder.copy_texture_to_texture(
ImageCopyTexture {
texture: &target.texture,
mip_level: 0,
origin: Default::default(),
aspect: Default::default(),
},
ImageCopyTexture {
texture: &fresh_texture,
mip_level: 0,
origin: Default::default(),
aspect: Default::default(),
},
extent,
);
BitmapHandle(Arc::new(Texture {
texture: Arc::new(fresh_texture),
bind_linear: Default::default(),
bind_nearest: Default::default(),
copy_count: Cell::new(0),
}))
}); });
*texture = cached_fresh_handle.clone(); render_command_encoder.copy_texture_to_texture(
} ImageCopyTexture {
texture_views.insert( texture: target,
*index, mip_level: 0,
as_texture(texture) origin: Default::default(),
.texture aspect: Default::default(),
.create_view(&wgpu::TextureViewDescriptor::default()), },
); ImageCopyTexture {
} texture: &fresh_texture,
PixelBenderShaderArgument::ValueInput { index, value } => { mip_level: 0,
let param = &compiled_shader.shader.params[*index as usize]; origin: Default::default(),
aspect: Default::default(),
},
extent,
);
let name = match param { BitmapHandle(Arc::new(Texture {
PixelBenderParam::Normal { name, .. } => name, texture: Arc::new(fresh_texture),
_ => unreachable!(), bind_linear: Default::default(),
}; bind_nearest: Default::default(),
copy_count: Cell::new(0),
if name == OUT_COORD_NAME { }))
continue;
}
let (value_vec, is_float): ([f32; 4], bool) = match value {
PixelBenderType::TFloat(f1) => ([*f1, 0.0, 0.0, 0.0], true),
PixelBenderType::TFloat2(f1, f2) => ([*f1, *f2, 0.0, 0.0], true),
PixelBenderType::TFloat3(f1, f2, f3) => ([*f1, *f2, *f3, 0.0], true),
PixelBenderType::TFloat4(f1, f2, f3, f4) => ([*f1, *f2, *f3, *f4], true),
PixelBenderType::TInt(i1) => ([*i1 as f32, 0.0, 0.0, 0.0], false),
PixelBenderType::TInt2(i1, i2) => {
([*i1 as f32, *i2 as f32, 0.0, 0.0], false)
}
PixelBenderType::TInt3(i1, i2, i3) => {
([*i1 as f32, *i2 as f32, *i3 as f32, 0.0], false)
}
PixelBenderType::TInt4(i1, i2, i3, i4) => {
([*i1 as f32, *i2 as f32, *i3 as f32, *i4 as f32], false)
}
_ => unreachable!("Unimplemented value {value:?}"),
};
// Both float32 and int are 4 bytes
let component_size_bytes = 4;
let (buffer, vec4_count) = if is_float {
let res = (&compiled_shader.float_parameters_buffer, float_offset);
float_offset += 1;
res
} else {
let res = (&compiled_shader.int_parameters_buffer, int_offset);
int_offset += 1;
res
};
let mut buffer_slice = staging_belt.write_buffer(
&mut render_command_encoder,
buffer,
vec4_count * 4 * component_size_bytes,
NonZeroU64::new(4 * component_size_bytes).unwrap(),
&self.descriptors.device,
);
buffer_slice.copy_from_slice(bytemuck::cast_slice(&value_vec));
}
}
}
// This needs to be a separate loop, so that we can get references into `texture_views`
for input in &arguments {
match input {
PixelBenderShaderArgument::ImageInput { index, .. } => {
let binding = naga_pixelbender::TEXTURE_START_BIND_INDEX + *index as u32;
bind_group_entries.push(BindGroupEntry {
binding,
resource: BindingResource::TextureView(&texture_views[index]),
}); });
*texture = Some(cached_fresh_handle.clone().into());
} }
PixelBenderShaderArgument::ValueInput { .. } => {} let wgpu_texture = image_input_as_texture(texture.as_ref().unwrap());
texture_views.insert(
*index,
wgpu_texture.create_view(&wgpu::TextureViewDescriptor::default()),
);
}
PixelBenderShaderArgument::ValueInput { index, value } => {
let param = &compiled_shader.shader.params[*index as usize];
let name = match param {
PixelBenderParam::Normal { name, .. } => name,
_ => unreachable!(),
};
if name == OUT_COORD_NAME {
continue;
}
let (value_vec, is_float): (Vec<f32>, bool) = match value {
PixelBenderType::TFloat(f1) => (vec![*f1, 0.0, 0.0, 0.0], true),
PixelBenderType::TFloat2(f1, f2) => (vec![*f1, *f2, 0.0, 0.0], true),
PixelBenderType::TFloat3(f1, f2, f3) => (vec![*f1, *f2, *f3, 0.0], true),
PixelBenderType::TFloat4(f1, f2, f3, f4) => (vec![*f1, *f2, *f3, *f4], true),
PixelBenderType::TInt(i1) => (vec![*i1 as f32, 0.0, 0.0, 0.0], false),
PixelBenderType::TInt2(i1, i2) => {
(vec![*i1 as f32, *i2 as f32, 0.0, 0.0], false)
}
PixelBenderType::TInt3(i1, i2, i3) => {
(vec![*i1 as f32, *i2 as f32, *i3 as f32, 0.0], false)
}
PixelBenderType::TInt4(i1, i2, i3, i4) => {
(vec![*i1 as f32, *i2 as f32, *i3 as f32, *i4 as f32], false)
}
PixelBenderType::TFloat2x2(arr) => (arr.to_vec(), true),
PixelBenderType::TFloat3x3(arr) => {
// Add a zero after every 3 values to created zero-padded vec4s
let mut vec4_arr = Vec::with_capacity(16);
for (i, val) in arr.iter().enumerate() {
vec4_arr.push(*val);
if i % 3 == 2 {
vec4_arr.push(0.0);
}
}
(vec4_arr, true)
}
PixelBenderType::TFloat4x4(arr) => (arr.to_vec(), true),
_ => unreachable!("Unimplemented value {value:?}"),
};
assert_eq!(
value_vec.len() % 4,
0,
"value_vec should represent concatenated vec4fs"
);
let num_vec4s = value_vec.len() / 4;
// Both float32 and int are 4 bytes
let component_size_bytes = 4;
let (buffer, vec4_count) = if is_float {
let res = (&compiled_shader.float_parameters_buffer, float_offset);
float_offset += num_vec4s;
res
} else {
let res = (&compiled_shader.int_parameters_buffer, int_offset);
int_offset += num_vec4s;
res
};
let mut buffer_slice = staging_belt.write_buffer(
render_command_encoder,
buffer,
vec4_count as u64 * 4 * component_size_bytes,
NonZeroU64::new(value_vec.len() as u64 * component_size_bytes).unwrap(),
&descriptors.device,
);
buffer_slice.copy_from_slice(bytemuck::cast_slice(&value_vec));
} }
} }
let bind_group = self
.descriptors
.device
.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &compiled_shader.bind_group_layout,
entries: &bind_group_entries,
});
staging_belt.finish();
let mut render_pass =
render_command_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("PixelBender render pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: frame_output.view(),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: true,
},
})],
depth_stencil_attachment: None,
});
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.set_pipeline(&compiled_shader.pipeline);
render_pass.set_vertex_buffer(0, self.descriptors.quad.vertices_pos.slice(..));
render_pass.set_index_buffer(
self.descriptors.quad.indices.slice(..),
wgpu::IndexFormat::Uint32,
);
render_pass.draw_indexed(0..6, 0, 0..1);
drop(render_pass);
self.descriptors
.queue
.submit(Some(render_command_encoder.finish()));
staging_belt.recall();
Ok(Box::new(QueueSyncHandle::NotCopied {
handle: target_handle,
copy_area: PixelRegion::for_whole_size(extent.width, extent.height),
descriptors: self.descriptors.clone(),
pool: self.offscreen_buffer_pool.clone(),
}))
} }
// This needs to be a separate loop, so that we can get references into `texture_views`
for input in &arguments {
match input {
PixelBenderShaderArgument::ImageInput { index, texture, .. } => {
let wgpu_texture = image_input_as_texture(texture.as_ref().unwrap());
if first_image.is_none() {
first_image = Some(wgpu_texture);
}
let binding = naga_pixelbender::TEXTURE_START_BIND_INDEX + *index as u32;
bind_group_entries.push(BindGroupEntry {
binding,
resource: BindingResource::TextureView(&texture_views[index]),
});
}
PixelBenderShaderArgument::ValueInput { .. } => {}
}
}
let bind_group = descriptors
.device
.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &compiled_shader.bind_group_layout,
entries: &bind_group_entries,
});
staging_belt.finish();
let vertices = source.vertices(&descriptors.device);
let mut render_pass = render_command_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("PixelBender render pass"),
color_attachments: &[color_attachment],
depth_stencil_attachment: None,
});
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.set_pipeline(&compiled_shader.pipeline);
render_pass.set_vertex_buffer(0, vertices.slice(..));
render_pass.set_index_buffer(
descriptors.quad.indices.slice(..),
wgpu::IndexFormat::Uint32,
);
render_pass.draw_indexed(0..6, 0, 0..1);
// Note - we just drop the staging belt, instead of recalling it,
// since we're not going to use it again.
Ok(())
} }

View File

@ -132,8 +132,8 @@ fn composer() -> Result<Composer, ComposerError> {
..Default::default() ..Default::default()
})?; })?;
composer.add_composable_module(ComposableModuleDescriptor { composer.add_composable_module(ComposableModuleDescriptor {
source: include_str!("../shaders/filter/common.wgsl"), source: ruffle_render::shader_source::SHADER_FILTER_COMMON,
file_path: "filter/common.wgsl", file_path: "shader_filter_common.wgsl",
..Default::default() ..Default::default()
})?; })?;
Ok(composer) Ok(composer)

View File

@ -0,0 +1,40 @@
package {
import flash.display.BitmapData;
import flash.display.ShaderJob;
import flash.display.Shader;
import flash.display.Bitmap;
import flash.display.MovieClip;
import flash.display.ShaderParameter;
public class Test {
[Embed(source = "YellowFlowers.png")]
public static var FLOWERS: Class;
[Embed(source = "mandelbrot.png")]
public static var MANDELBROT: Class;
// Shader from
[Embed(source = "glassDisplace.pbj", mimeType="application/octet-stream")]
public static var GLASSDISPLACE_BYTES: Class;
public function Test(main: MovieClip) {
main.stage.scaleMode = "noScale";
var flowers: Bitmap = new FLOWERS();
var mandelbrot: Bitmap = new MANDELBROT();
main.addChild(new Bitmap(glassDisplace(flowers.bitmapData.clone(), mandelbrot.bitmapData.clone())));
}
private function glassDisplace(input1: BitmapData, input2): BitmapData {
var out = new BitmapData(Math.max(input1.width, input2.width), Math.max(input1.height, input2.height), true, 0xFF00FF00);
var shader = new ShaderJob(new Shader(new GLASSDISPLACE_BYTES()), out);
shader.shader.data.center.value = [80, 420];
shader.shader.data.stretch.value = [180, 20];
shader.shader.data.alpha.value = [0.27];
shader.shader.data.src.input = input1;
shader.shader.data.src2.input = input2;
shader.start(true);
return out;
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 412 KiB

View File

@ -0,0 +1,43 @@
// Based on https://github.com/nojvek/pixelbender/blob/master/selfDisplace/glassDisplace.pbk
<languageVersion : 1.0;>
kernel NewFilter
< namespace : "com.om-labs.pixelbender.selfDisplace";
vendor : "Om Labs";
version : 1;
description : "depending on the color values of the image,displacement, centrepoint and stretch, create freaky images.";
>
{
parameter float2 center<
minValue:float2(-1000.0);
maxValue:float2(1000.0);
defaultValue:float2(500.0,400.0);
>;
parameter float2 stretch<
minValue: float2(-1000.0);
maxValue: float2(1000.0);
defaultValue: float2(0.0);
>;
parameter float alpha<
minValue:0.0;
maxValue:1.0;
defaultValue:0.0;
>;
input image4 src;
input image4 src2;
output float4 dst;
void evaluatePixel(){
float2 pos = outCoord();
float4 heightColor = sampleNearest(src,pos);
float height = (heightColor.r+heightColor.g+heightColor.b)/3.0;
float2 direction = normalize(pos-center);
pos += float2(-height*direction.x*stretch.x,-height*direction.y*stretch.y);
dst = mix(sampleNearest(src,pos),sampleNearest(src2,pos),alpha);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

View File

@ -0,0 +1,9 @@
num_frames = 1
[image_comparison]
tolerance = 3
max_outliers = 1003
[player_options]
viewport_dimensions = { width = 600, height = 700, scale_factor = 1 }
with_renderer = { optional = false, sample_count = 1 }

View File

@ -0,0 +1,76 @@
package {
import flash.display.BitmapData;
import flash.display.ShaderJob;
import flash.display.Shader;
import flash.display.Bitmap;
import flash.display.MovieClip;
import flash.display.ShaderParameter;
import flash.geom.Rectangle;
import flash.geom.Point;
import flash.filters.ShaderFilter;
public class Test {
[Embed(source = "YellowFlowers.png")]
public static var FLOWERS: Class;
[Embed(source = "mandelbrot.png")]
public static var MANDELBROT: Class;
// Shader from
[Embed(source = "glassDisplace.pbj", mimeType="application/octet-stream")]
public static var GLASSDISPLACE_BYTES: Class;
public function Test(main: MovieClip) {
//main.stage.scaleMode = "noScale";
var flowers: Bitmap = new FLOWERS();
var mandelbrot: Bitmap = new MANDELBROT();
var shader = glassDisplace(mandelbrot.bitmapData);
var width = Math.max(flowers.width, mandelbrot.width);
var height = Math.max(flowers.height, mandelbrot.height);
trace("Flowers rect: " + flowers.bitmapData.rect);
var out1 = new Bitmap(flowers.bitmapData.clone());
var out2 = new Bitmap(new BitmapData(width, height, true, 0xFF0000FF));
//var out2 = new Bitmap(new BitmapData(flowers.bitmapData.width, flowers.bitmapData.height, true, 0xFF0000FF));
var filter = new ShaderFilter(shader);
out1.filters = [filter];
trace("ShaderFilter equal: " + (out1.filters[0] === filter));
trace("Shader equal: " + (out1.filters[0].shader === filter.shader));
trace("Dest rect: " + out2.bitmapData.generateFilterRect(new Rectangle(100, 10, 400, 20), filter));
out2.bitmapData.applyFilter(flowers.bitmapData, new Rectangle(0, 0, 20, 20), new Point(0, 0), filter);
out2.y = 390;
main.addChild(out1);
main.addChild(out2);
}
private function glassDisplace(input2: BitmapData): Shader {
// This should be unused, since it's bounded to the first image input
// (which gets overwritten when applying ShaderFilter)
var fake = new BitmapData(300, 100, true, 0xFFFF0000);
var shader = new Shader(new GLASSDISPLACE_BYTES());
shader.data.center.value = [80, 420];
shader.data.stretch.value = [180, 20];
// Uncomment the following lines to simplify the shader output
// to make comparisons between Ruffle and Flash easier.
//shader.data.center.value =[0, 0];
//shader.data.stretch.value = [0, 0];
shader.data.alpha.value = [1.0];
shader.data.src.input = fake;
shader.data.src2.input = input2;
return shader;
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 299 KiB

View File

@ -0,0 +1,43 @@
// Based on https://github.com/nojvek/pixelbender/blob/master/selfDisplace/glassDisplace.pbk
<languageVersion : 1.0;>
kernel NewFilter
< namespace : "com.om-labs.pixelbender.selfDisplace";
vendor : "Om Labs";
version : 1;
description : "depending on the color values of the image,displacement, centrepoint and stretch, create freaky images.";
>
{
parameter float2 center<
minValue:float2(-1000.0);
maxValue:float2(1000.0);
defaultValue:float2(500.0,400.0);
>;
parameter float2 stretch<
minValue: float2(-1000.0);
maxValue: float2(1000.0);
defaultValue: float2(0.0);
>;
parameter float alpha<
minValue:0.0;
maxValue:1.0;
defaultValue:0.0;
>;
input image4 src;
input image4 src2;
output float4 dst;
void evaluatePixel(){
float2 pos = outCoord();
float4 heightColor = sampleNearest(src,pos);
float height = (heightColor.r+heightColor.g+heightColor.b)/3.0;
float2 direction = normalize(pos-center);
pos += float2(-height*direction.x*stretch.x,-height*direction.y*stretch.y);
dst = mix(sampleNearest(src,pos),sampleNearest(src2,pos),alpha);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

View File

@ -0,0 +1,4 @@
Flowers rect: (x=0, y=0, w=500, h=375)
ShaderFilter equal: false
Shader equal: true
Dest rect: (x=0, y=0, w=512, h=512)

View File

@ -0,0 +1,8 @@
num_frames = 10
[image_comparison]
tolerance = 3
max_outliers = 380
[player_options]
with_renderer = { optional = false, sample_count = 1 }

View File

@ -0,0 +1,35 @@
package {
import flash.display.BitmapData;
import flash.display.ShaderJob;
import flash.display.Shader;
import flash.display.Bitmap;
import flash.display.MovieClip;
public class Test {
[Embed(source = "YellowFlowers.png")]
public static var FLOWERS: Class;
// Shader from
[Embed(source = "smudge.pbj", mimeType="application/octet-stream")]
public static var SMUDGE_BYTES: Class;
public function Test(main: MovieClip) {
var flowers: Bitmap = new FLOWERS();
main.addChild(new Bitmap(smudge(flowers.bitmapData.clone())));
}
private function smudge(input: BitmapData): BitmapData {
var shader = new ShaderJob(new Shader(new SMUDGE_BYTES()), input);
shader.shader.data.bBox.value = [210, 200, 0, 260];
shader.shader.data.exponent.value = [-7.2];
shader.shader.data.factor.value = [-6.4];
shader.shader.data.center.value = [-1.12, 0.5];
shader.shader.data.size.value = [1.02];
shader.shader.data.smudge.value = [0.38];
shader.shader.data.src.input = input;
shader.start(true);
return input
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

View File

@ -0,0 +1,73 @@
// Based on https://github.com/hoojaoh/PhotoFilterShaders/blob/master/Smudge.pbk
<languageVersion : 1.0;>
kernel Smudge
< namespace : "Smudge";
vendor : "Paperless Post";
version : 1;
>
{
input image4 src;
output pixel4 dst;
//left, right, top, bottom
parameter float4 bBox
<
minValue:float4(0.0,0.0,0.0,0.0);
maxValue:float4(1000.0,1000.0,1000.0,1000.0);
defaultValue:float4(0.0,600.0,0.0,400.0);
>;
parameter float exponent
<
minValue:float(-10.0);
maxValue:float(10.0);
defaultValue:float(0.0);
>;
parameter float factor
<
minValue:float(-10.0);
maxValue:float(10.0);
defaultValue:float(0.0);
>;
parameter float2 center
<
minValue:float2(-2.0,-2.0);
maxValue:float2(2.0,2.0);
defaultValue:float2(0.5,0.5);
>;
//controls size of the gradient
parameter float size
<
minValue:float(0);
maxValue:float(2);
defaultValue:float(1);
>;
//smudge factor
parameter float smudge
<
minValue:float(0.0);
maxValue:float(1.0);
defaultValue:float(0.0);
>;
void
evaluatePixel()
{
dst = sampleNearest(src, outCoord());
float alpha = dst.a;
float2 centerPos = float2(mix(bBox[0],bBox[1],center[0]),mix(bBox[2],bBox[3],center[1]));
float dist = distance(centerPos,outCoord());
dist = dist * (1.0/size);
dist = dist/distance(centerPos,float2(bBox[0],bBox[3])); // normalize the distance between 0 and 1
dist = 1.0 - factor * pow(dist,exponent); // darken colors in relation to distance from center
dist *= smudge;
dst *= 1.0 + dist;
dst.a = alpha;
}
}

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,7 @@
num_frames = 1
[image_comparison]
tolerance = 1
[player_options]
with_renderer = { optional = false, sample_count = 1 }

View File

@ -0,0 +1,35 @@
package {
import flash.display.BitmapData;
import flash.display.ShaderJob;
import flash.display.Shader;
import flash.display.Bitmap;
import flash.display.MovieClip;
public class Test {
[Embed(source = "YellowFlowers.png")]
public static var FLOWERS: Class;
// Shader from
[Embed(source = "tintype.pbj", mimeType="application/octet-stream")]
public static var TINTYPE_BYTES: Class;
public function Test(main: MovieClip) {
var flowers: Bitmap = new FLOWERS();
main.addChild(new Bitmap(tintype(flowers.bitmapData.clone())));
}
private function tintype(input: BitmapData): BitmapData {
var shader = new ShaderJob(new Shader(new TINTYPE_BYTES()), input);
shader.shader.data.grayScale.value = [
0.9, 0.6094, 0.082,
0.3086, 0.8, 0.082,
0.3086, 1.2, 0.7];
shader.shader.data.contrast.value = [1.83];
shader.shader.data.mid.value = [1];
shader.shader.data.src.input = input;
shader.start(true);
return input
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 241 KiB

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,7 @@
num_frames = 1
[image_comparison]
tolerance = 1
[player_options]
with_renderer = { optional = false, sample_count = 1 }

Binary file not shown.

View File

@ -0,0 +1,45 @@
// Based on https://github.com/hoojaoh/PhotoFilterShaders/blob/master/TinType.pbk
<languageVersion : 1.0;>
kernel TinType
< namespace : "TinType";
vendor : "Paperless Post";
version : 1;
>
{
input image4 src;
output pixel4 dst;
parameter float3x3 grayScale
<
defaultValue:float3x3(.3086,.6094,.0820,
.3086,.6094,.0820,
.3086,.6094,.0820);
>;
parameter float contrast
<
minValue:float(0.0);
maxValue:float(3.0);
defaultValue:float(1.0);
>;
parameter float mid
<
minValue:float(0.0);
maxValue:float(1.0);
defaultValue:float(0.5);
>;
void
evaluatePixel()
{
float2 pos = outCoord();
dst = sampleNearest(src,pos);
dst.rgb = dst.rgb * grayScale;
dst.r = ((dst.r - mid) * contrast) + mid;
dst.g = ((dst.g - mid) * contrast) + mid;
dst.b = ((dst.b - mid) * contrast) + mid;
}
}