render: Support PixelBender ByteArray/Vector.<Number> input/output

When ActionScript uses a ByteArray/Vector.<Number> as a shader input
or target, we create a temporary Rgba32Float texture, and copy the
input float32 bytes to/from the texture.

Unfortunately, wgpu doesn't seem to support an Rgb32Float (3-channel)
texture. When the shader uses 3 channels, we use a Rgba32Float
(4-channel) texture, and manually insert/remove padding for the
alpha channels. This isn't very efficient, but it's the simplest
solution.

The temporary textures themselves aren't cached anywhere - if this
becomes a performance issue, we could look into using some of our
existing wgpu texture/buffer pooling code.
This commit is contained in:
Aaron Hill 2023-12-02 15:26:58 -05:00
parent a916f6e7a2
commit 1cb24b41b0
18 changed files with 530 additions and 124 deletions

View File

@ -14,6 +14,10 @@ package flash.display {
return _height; return _height;
} }
public function set height(value:int):void {
_height = value;
}
public function get index():int { public function get index():int {
return _index; return _index;
} }
@ -22,6 +26,10 @@ package flash.display {
return _width; return _width;
} }
public function set width(value:int):void {
_width = value;
}
public function get input():Object { public function get input():Object {
return _object; return _object;
} }

View File

@ -9,10 +9,14 @@ package flash.display {
private var _shader:Shader; private var _shader:Shader;
private var _target:Object; private var _target:Object;
private var _width:int;
private var _height:int;
public function ShaderJob(shader:Shader = null, target:Object = null, width:int = 0, height:int = 0) { public function ShaderJob(shader:Shader = null, target:Object = null, width:int = 0, height:int = 0) {
this._shader = shader; this._shader = shader;
this._target = target; this._target = target;
this._width = width;
this._height = height;
stub_constructor("flash.display.ShaderJob"); stub_constructor("flash.display.ShaderJob");
} }
@ -23,12 +27,19 @@ package flash.display {
public native function start(waitForCompletion:Boolean = false):void; public native function start(waitForCompletion:Boolean = false):void;
public function get height():int { public function get height():int {
stub_getter("flash.display.ShaderJob", "height"); return this._height;
return 0;
} }
public function set height(value:int):void { public function set height(value:int):void {
stub_setter("flash.display.ShaderJob", "height"); this._height = value;
}
public function get width():int {
return this._width;
}
public function set width(value:int):void {
this._width = value;
} }
public function get progress():Number { public function get progress():Number {

View File

@ -1,13 +1,17 @@
use ruffle_render::{ use ruffle_render::{
backend::{PixelBenderOutput, PixelBenderTarget},
bitmap::PixelRegion, bitmap::PixelRegion,
pixel_bender::{ pixel_bender::{
PixelBenderParam, PixelBenderParamQualifier, PixelBenderShaderArgument, ImageInputTexture, PixelBenderParam, PixelBenderParamQualifier, PixelBenderShaderArgument,
PixelBenderShaderHandle, PixelBenderType, OUT_COORD_NAME, PixelBenderShaderHandle, PixelBenderType, OUT_COORD_NAME,
}, },
}; };
use crate::{ use crate::{
avm2::{string::AvmString, Activation, Error, Object, TObject, Value}, avm2::{
bytearray::Endian, parameters::ParametersExt, string::AvmString, Activation, Error, Object,
TObject, Value,
},
avm2_stub_method, avm2_stub_method,
pixel_bender::PixelBenderTypeExt, pixel_bender::PixelBenderTypeExt,
}; };
@ -100,6 +104,25 @@ pub fn get_shader_args<'gc>(
.get_public_property("input", activation) .get_public_property("input", activation)
.expect("Missing input property"); .expect("Missing input property");
let width = shader_input
.get_public_property("width", activation)
.unwrap()
.as_u32(activation.context.gc_context)
.unwrap();
let height = shader_input
.get_public_property("height", activation)
.unwrap()
.as_u32(activation.context.gc_context)
.unwrap();
let input_channels = shader_input
.get_public_property("channels", activation)
.unwrap()
.as_u32(activation.context.gc_context)
.unwrap();
assert_eq!(*channels as u32, input_channels);
let texture = if let Value::Null = input { let texture = if let Value::Null = input {
None None
} else { } else {
@ -107,21 +130,49 @@ pub fn get_shader_args<'gc>(
.as_object() .as_object()
.expect("ShaderInput.input is not an object"); .expect("ShaderInput.input is not an object");
let bitmap = input.as_bitmap_data().expect( let input_texture = if let Some(bitmap) = input.as_bitmap_data() {
"ShaderInput.input is not a BitmapData (FIXME - support other types)", ImageInputTexture::Bitmap(bitmap.bitmap_handle(
);
Some(bitmap.bitmap_handle(
activation.context.gc_context, activation.context.gc_context,
activation.context.renderer, activation.context.renderer,
)) ))
} else if let Some(byte_array) = input.as_bytearray() {
let expected_len = (width * height * input_channels) as usize
* std::mem::size_of::<f32>();
assert_eq!(byte_array.len(), expected_len);
assert_eq!(byte_array.endian(), Endian::Little);
ImageInputTexture::Bytes {
width,
height,
channels: input_channels,
bytes: byte_array.read_at(0, byte_array.len()).unwrap().to_vec(),
}
} else if let Some(vector) = input.as_vector_storage() {
let expected_len = (width * height * input_channels) as usize;
assert_eq!(vector.length(), expected_len);
ImageInputTexture::Bytes {
width,
height,
channels: input_channels,
bytes: vector
.iter()
.flat_map(|val| {
(val.as_number(activation.context.gc_context).unwrap()
as f32)
.to_le_bytes()
})
.collect(),
}
} else {
panic!("Unexpected input object {input:?}");
};
Some(input_texture)
}; };
Some(PixelBenderShaderArgument::ImageInput { Some(PixelBenderShaderArgument::ImageInput {
index: *index, index: *index,
channels: *channels, channels: *channels,
name: name.clone(), name: name.clone(),
texture: texture.map(|t| t.into()), texture,
}) })
} }
} }
@ -134,15 +185,17 @@ pub fn get_shader_args<'gc>(
pub fn start<'gc>( pub fn start<'gc>(
activation: &mut Activation<'_, 'gc>, activation: &mut Activation<'_, 'gc>,
this: Object<'gc>, this: Object<'gc>,
_args: &[Value<'gc>], args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> { ) -> Result<Value<'gc>, Error<'gc>> {
let wait_for_completion = args.get_bool(0);
if !wait_for_completion {
avm2_stub_method!( avm2_stub_method!(
activation, activation,
"flash.display.ShaderJob", "flash.display.ShaderJob",
"start", "start",
"async execution and non-BitmapData inputs" "with waitForCompletion=false"
); );
}
let shader = this let shader = this
.get_public_property("shader", activation)? .get_public_property("shader", activation)?
.as_object() .as_object()
@ -155,26 +208,45 @@ pub fn start<'gc>(
.as_object() .as_object()
.expect("ShaderJob.target is not an object"); .expect("ShaderJob.target is not an object");
let target_bitmap = target let output_width = this
.as_bitmap_data() .get_public_property("width", activation)?
.expect("ShaderJob.target is not a BitmapData (FIXME - support other types)") .as_u32(activation.context.gc_context)
.sync(); .expect("ShaderJob.width is not a number");
let output_height = this
.get_public_property("height", activation)?
.as_u32(activation.context.gc_context)
.expect("ShaderJob.height is not a number");
let pixel_bender_target = if let Some(bitmap) = target.as_bitmap_data() {
let target_bitmap = bitmap.sync();
// Perform both a GPU->CPU and CPU->GPU sync before writing to it. // Perform both a GPU->CPU and CPU->GPU sync before writing to it.
// FIXME - are both necessary? // FIXME - are both necessary?
let mut target_bitmap_data = target_bitmap.write(activation.context.gc_context); let mut target_bitmap_data = target_bitmap.write(activation.context.gc_context);
target_bitmap_data.update_dirty_texture(activation.context.renderer); target_bitmap_data.update_dirty_texture(activation.context.renderer);
let target_handle = target_bitmap_data PixelBenderTarget::Bitmap(
target_bitmap_data
.bitmap_handle(activation.context.renderer) .bitmap_handle(activation.context.renderer)
.expect("Missing handle"); .expect("Missing handle"),
)
} else {
PixelBenderTarget::Bytes {
width: output_width,
height: output_height,
}
};
let sync_handle = activation let output = activation
.context .context
.renderer .renderer
.run_pixelbender_shader(shader_handle, &arguments, target_handle) .run_pixelbender_shader(shader_handle, &arguments, &pixel_bender_target)
.expect("Failed to run shader"); .expect("Failed to run shader");
match output {
PixelBenderOutput::Bitmap(sync_handle) => {
let target_bitmap = target.as_bitmap_data().unwrap().sync();
let mut target_bitmap_data = target_bitmap.write(activation.context.gc_context);
let width = target_bitmap_data.width(); let width = target_bitmap_data.width();
let height = target_bitmap_data.height(); let height = target_bitmap_data.height();
target_bitmap_data.set_gpu_dirty( target_bitmap_data.set_gpu_dirty(
@ -182,6 +254,23 @@ pub fn start<'gc>(
sync_handle, sync_handle,
PixelRegion::for_whole_size(width, height), PixelRegion::for_whole_size(width, height),
); );
}
PixelBenderOutput::Bytes(pixels) => {
if let Some(mut bytearray) = target.as_bytearray_mut(activation.context.gc_context) {
bytearray.write_at(&pixels, 0).unwrap();
} else if let Some(mut vector) =
target.as_vector_storage_mut(activation.context.gc_context)
{
let new_storage: Vec<_> = bytemuck::cast_slice::<u8, f32>(&pixels)
.iter()
.map(|p| Value::from(*p as f64))
.collect();
vector.replace_storage(new_storage);
} else {
panic!("Unexpected target object {target:?}");
}
}
}
Ok(Value::Undefined) Ok(Value::Undefined)
} }

View File

@ -3,8 +3,8 @@
#![allow(clippy::arc_with_non_send_sync)] #![allow(clippy::arc_with_non_send_sync)]
use ruffle_render::backend::{ use ruffle_render::backend::{
BitmapCacheEntry, Context3D, Context3DProfile, RenderBackend, ShapeHandle, ShapeHandleImpl, BitmapCacheEntry, Context3D, Context3DProfile, PixelBenderOutput, PixelBenderTarget,
ViewportDimensions, RenderBackend, ShapeHandle, ShapeHandleImpl, ViewportDimensions,
}; };
use ruffle_render::bitmap::{ use ruffle_render::bitmap::{
Bitmap, BitmapHandle, BitmapHandleImpl, BitmapSource, PixelRegion, PixelSnapping, SyncHandle, Bitmap, BitmapHandle, BitmapHandleImpl, BitmapSource, PixelRegion, PixelSnapping, SyncHandle,
@ -523,8 +523,8 @@ impl RenderBackend for WebCanvasRenderBackend {
&mut self, &mut self,
_handle: ruffle_render::pixel_bender::PixelBenderShaderHandle, _handle: ruffle_render::pixel_bender::PixelBenderShaderHandle,
_arguments: &[ruffle_render::pixel_bender::PixelBenderShaderArgument], _arguments: &[ruffle_render::pixel_bender::PixelBenderShaderArgument],
_target: BitmapHandle, _target: &PixelBenderTarget,
) -> Result<Box<dyn SyncHandle>, Error> { ) -> Result<PixelBenderOutput, Error> {
Err(Error::Unimplemented("run_pixelbender_shader".into())) Err(Error::Unimplemented("run_pixelbender_shader".into()))
} }

View File

@ -104,11 +104,26 @@ pub trait RenderBackend: Downcast {
&mut self, &mut self,
handle: PixelBenderShaderHandle, handle: PixelBenderShaderHandle,
arguments: &[PixelBenderShaderArgument], arguments: &[PixelBenderShaderArgument],
target: BitmapHandle, target: &PixelBenderTarget,
) -> Result<Box<dyn SyncHandle>, Error>; ) -> Result<PixelBenderOutput, Error>;
} }
impl_downcast!(RenderBackend); impl_downcast!(RenderBackend);
pub enum PixelBenderTarget {
// The shader will write to the provided bitmap texture,
// producing a `PixelBenderOutput::Bitmap` with the corresponding
// `SyncHandle`
Bitmap(BitmapHandle),
// The shader will write to a temporary texture, which will then
// be immediately read back as bytes (in `PixelBenderOutput::Bytes`)
Bytes { width: u32, height: u32 },
}
pub enum PixelBenderOutput {
Bitmap(Box<dyn SyncHandle>),
Bytes(Vec<u8>),
}
pub trait IndexBuffer: Downcast {} pub trait IndexBuffer: Downcast {}
impl_downcast!(IndexBuffer); impl_downcast!(IndexBuffer);
pub trait VertexBuffer: Downcast {} pub trait VertexBuffer: Downcast {}

View File

@ -14,7 +14,7 @@ use crate::quality::StageQuality;
use crate::shape_utils::DistilledShape; use crate::shape_utils::DistilledShape;
use swf::Color; use swf::Color;
use super::{Context3D, Context3DProfile}; use super::{Context3D, Context3DProfile, PixelBenderOutput, PixelBenderTarget};
pub struct NullBitmapSource; pub struct NullBitmapSource;
@ -115,8 +115,8 @@ impl RenderBackend for NullRenderer {
&mut self, &mut self,
_shader: PixelBenderShaderHandle, _shader: PixelBenderShaderHandle,
_arguments: &[PixelBenderShaderArgument], _arguments: &[PixelBenderShaderArgument],
_target: BitmapHandle, _target: &PixelBenderTarget,
) -> Result<Box<dyn SyncHandle>, Error> { ) -> Result<PixelBenderOutput, Error> {
Err(Error::Unimplemented("Pixel bender shader".into())) Err(Error::Unimplemented("Pixel bender shader".into()))
} }

View File

@ -280,6 +280,12 @@ pub enum PixelBenderShaderArgument<'a> {
pub enum ImageInputTexture<'a> { pub enum ImageInputTexture<'a> {
Bitmap(BitmapHandle), Bitmap(BitmapHandle),
TextureRef(&'a dyn RawTexture), TextureRef(&'a dyn RawTexture),
Bytes {
width: u32,
height: u32,
channels: u32,
bytes: Vec<u8>,
},
} }
impl PartialEq for ImageInputTexture<'_> { impl PartialEq for ImageInputTexture<'_> {

View File

@ -4,8 +4,8 @@
use bytemuck::{Pod, Zeroable}; use bytemuck::{Pod, Zeroable};
use ruffle_render::backend::{ use ruffle_render::backend::{
BitmapCacheEntry, Context3D, Context3DProfile, RenderBackend, ShapeHandle, ShapeHandleImpl, BitmapCacheEntry, Context3D, Context3DProfile, PixelBenderOutput, PixelBenderTarget,
ViewportDimensions, RenderBackend, ShapeHandle, ShapeHandleImpl, ViewportDimensions,
}; };
use ruffle_render::bitmap::{ use ruffle_render::bitmap::{
Bitmap, BitmapFormat, BitmapHandle, BitmapHandleImpl, BitmapSource, PixelRegion, PixelSnapping, Bitmap, BitmapFormat, BitmapHandle, BitmapHandleImpl, BitmapSource, PixelRegion, PixelSnapping,
@ -1139,8 +1139,8 @@ impl RenderBackend for WebGlRenderBackend {
&mut self, &mut self,
_handle: ruffle_render::pixel_bender::PixelBenderShaderHandle, _handle: ruffle_render::pixel_bender::PixelBenderShaderHandle,
_arguments: &[ruffle_render::pixel_bender::PixelBenderShaderArgument], _arguments: &[ruffle_render::pixel_bender::PixelBenderShaderArgument],
_target: BitmapHandle, _target: &PixelBenderTarget,
) -> Result<Box<dyn SyncHandle>, BitmapError> { ) -> Result<PixelBenderOutput, BitmapError> {
Err(BitmapError::Unimplemented("run_pixelbender_shader".into())) Err(BitmapError::Unimplemented("run_pixelbender_shader".into()))
} }

View File

@ -14,7 +14,9 @@ use crate::{
QueueSyncHandle, RenderTarget, SwapChainTarget, Texture, Transforms, QueueSyncHandle, RenderTarget, SwapChainTarget, Texture, Transforms,
}; };
use image::imageops::FilterType; use image::imageops::FilterType;
use ruffle_render::backend::{BitmapCacheEntry, Context3D, Context3DProfile}; use ruffle_render::backend::{
BitmapCacheEntry, Context3D, Context3DProfile, PixelBenderOutput, PixelBenderTarget,
};
use ruffle_render::backend::{RenderBackend, ShapeHandle, ViewportDimensions}; use ruffle_render::backend::{RenderBackend, ShapeHandle, ViewportDimensions};
use ruffle_render::bitmap::{ use ruffle_render::bitmap::{
Bitmap, BitmapFormat, BitmapHandle, BitmapSource, PixelRegion, SyncHandle, Bitmap, BitmapFormat, BitmapHandle, BitmapSource, PixelRegion, SyncHandle,
@ -23,7 +25,8 @@ use ruffle_render::commands::CommandList;
use ruffle_render::error::Error as BitmapError; use ruffle_render::error::Error as BitmapError;
use ruffle_render::filters::Filter; use ruffle_render::filters::Filter;
use ruffle_render::pixel_bender::{ use ruffle_render::pixel_bender::{
PixelBenderShader, PixelBenderShaderArgument, PixelBenderShaderHandle, PixelBenderParam, PixelBenderParamQualifier, PixelBenderShader, PixelBenderShaderArgument,
PixelBenderShaderHandle,
}; };
use ruffle_render::quality::StageQuality; use ruffle_render::quality::StageQuality;
use ruffle_render::shape_utils::DistilledShape; use ruffle_render::shape_utils::DistilledShape;
@ -374,6 +377,7 @@ impl<T: RenderTarget> WgpuRenderBackend<T> {
let copy_dimensions = BufferDimensions::new( let copy_dimensions = BufferDimensions::new(
texture.texture.width() as usize, texture.texture.width() as usize,
texture.texture.height() as usize, texture.texture.height() as usize,
texture.texture.format(),
); );
let buffer = self let buffer = self
.offscreen_buffer_pool .offscreen_buffer_pool
@ -948,25 +952,87 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
&mut self, &mut self,
shader: PixelBenderShaderHandle, shader: PixelBenderShaderHandle,
arguments: &[PixelBenderShaderArgument], arguments: &[PixelBenderShaderArgument],
target_handle: BitmapHandle, target: &PixelBenderTarget,
) -> Result<Box<dyn SyncHandle>, BitmapError> { ) -> Result<PixelBenderOutput, BitmapError> {
let target = as_texture(&target_handle); let mut output_channels = None;
for param in &shader.0.parsed_shader().params {
if let PixelBenderParam::Normal {
qualifier: PixelBenderParamQualifier::Output,
reg,
..
} = param
{
if output_channels.is_some() {
panic!("Multiple output parameters");
}
output_channels = Some(reg.channels.len());
break;
}
}
let output_channels = output_channels.expect("No output parameter");
let has_padding = output_channels == 3;
let texture_format =
crate::pixel_bender::temporary_texture_format_for_channels(output_channels as u32);
let target_handle = match target {
PixelBenderTarget::Bitmap(handle) => handle.clone(),
PixelBenderTarget::Bytes { width, height } => {
let extent = wgpu::Extent3d {
width: *width,
height: *height,
depth_or_array_layers: 1,
};
// FIXME - cache this texture somehow. We might also want to consider using
// a compute shader
let texture_label = create_debug_label!("Temporary pixelbender output texture");
let texture = self
.descriptors
.device
.create_texture(&wgpu::TextureDescriptor {
label: texture_label.as_deref(),
size: extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: texture_format,
view_formats: &[texture_format],
usage: wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::COPY_SRC,
});
BitmapHandle(Arc::new(Texture {
texture: Arc::new(texture),
bind_linear: Default::default(),
bind_nearest: Default::default(),
copy_count: Cell::new(0),
}))
}
};
let target_texture = as_texture(&target_handle);
let extent = wgpu::Extent3d { let extent = wgpu::Extent3d {
width: target.texture.width(), width: target_texture.texture.width(),
height: target.texture.height(), height: target_texture.texture.height(),
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let buffer_info = self.get_texture_buffer_info( let buffer_info = self.get_texture_buffer_info(
target, target_texture,
PixelRegion::for_whole_size(target.texture.width(), target.texture.height()), PixelRegion::for_whole_size(
target_texture.texture.width(),
target_texture.texture.height(),
),
); );
let mut texture_target = TextureTarget { let mut texture_target = TextureTarget {
size: extent, size: extent,
texture: target.texture.clone(), texture: target_texture.texture.clone(),
format: wgpu::TextureFormat::Rgba8Unorm, format: target_texture.texture.format(),
buffer: buffer_info, buffer: buffer_info,
}; };
@ -986,7 +1052,7 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
shader, shader,
ShaderMode::ShaderJob, ShaderMode::ShaderJob,
arguments, arguments,
&target.texture, &target_texture.texture,
&mut render_command_encoder, &mut render_command_encoder,
Some(wgpu::RenderPassColorAttachment { Some(wgpu::RenderPassColorAttachment {
view: frame_output.view(), view: frame_output.view(),
@ -998,7 +1064,7 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
}), }),
1, 1,
// When running a standalone shader, we always process the entire image // When running a standalone shader, we always process the entire image
&FilterSource::for_entire_texture(&target.texture), &FilterSource::for_entire_texture(&target_texture.texture),
)?; )?;
let index = self let index = self
@ -1006,12 +1072,53 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
.queue .queue
.submit(Some(render_command_encoder.finish())); .submit(Some(render_command_encoder.finish()));
Ok(self.make_queue_sync_handle( let sync_handle = self.make_queue_sync_handle(
texture_target, texture_target,
index, index,
target_handle, target_handle,
PixelRegion::for_whole_size(extent.width, extent.height), PixelRegion::for_whole_size(extent.width, extent.height),
)) );
match target {
PixelBenderTarget::Bitmap(_) => Ok(PixelBenderOutput::Bitmap(sync_handle)),
PixelBenderTarget::Bytes { width, .. } => {
let mut output = None;
sync_handle.retrieve_offscreen_texture(Box::new(|raw_pixels, buffer_width| {
if buffer_width as usize
!= *width as usize * output_channels * std::mem::size_of::<f32>()
{
let channels_in_raw_pixels = if has_padding { 4usize } else { 3usize };
let mut new_pixels = Vec::new();
for row in raw_pixels.chunks(buffer_width as usize) {
// Ignore any wgpu-added padding (this is distinct from the alpha-channel padding
// that we add for pixelbender)
let actual_row = &row[0..(*width as usize
* channels_in_raw_pixels
* std::mem::size_of::<f32>())];
for pixel in actual_row
.chunks_exact(channels_in_raw_pixels * std::mem::size_of::<f32>())
{
if has_padding {
// Take the first three channels
new_pixels.extend_from_slice(
&pixel[0..(3 * std::mem::size_of::<f32>())],
);
} else {
// Copy the pixel as-is
new_pixels.extend_from_slice(pixel);
}
}
}
output = Some(new_pixels);
} else {
output = Some(raw_pixels.to_vec());
};
}))?;
Ok(PixelBenderOutput::Bytes(output.unwrap()))
}
}
} }
fn create_empty_texture( fn create_empty_texture(
@ -1113,6 +1220,7 @@ async fn request_device(
wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
wgpu::Features::SHADER_UNUSED_VERTEX_OUTPUT, wgpu::Features::SHADER_UNUSED_VERTEX_OUTPUT,
wgpu::Features::TEXTURE_COMPRESSION_BC, wgpu::Features::TEXTURE_COMPRESSION_BC,
wgpu::Features::FLOAT32_FILTERABLE,
]; ];
for feature in try_features { for feature in try_features {

View File

@ -222,8 +222,12 @@ impl QueueSyncHandle {
} => { } => {
let texture = as_texture(&handle); let texture = as_texture(&handle);
let buffer_dimensions = let buffer_dimensions = BufferDimensions::new(
BufferDimensions::new(copy_area.width() as usize, copy_area.height() as usize); copy_area.width() as usize,
copy_area.height() as usize,
texture.texture.format(),
);
let buffer = pool.take(&descriptors, buffer_dimensions.clone()); let buffer = pool.take(&descriptors, buffer_dimensions.clone());
let label = create_debug_label!("Render target transfer encoder"); let label = create_debug_label!("Render target transfer encoder");
let mut encoder = let mut encoder =

View File

@ -1,6 +1,6 @@
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::HashMap;
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::sync::OnceLock;
use std::{borrow::Cow, cell::Cell, sync::Arc}; use std::{borrow::Cow, cell::Cell, sync::Arc};
use indexmap::IndexMap; use indexmap::IndexMap;
@ -23,7 +23,6 @@ use wgpu::{
use crate::filters::{FilterSource, VERTEX_BUFFERS_DESCRIPTION_FILTERS}; use crate::filters::{FilterSource, VERTEX_BUFFERS_DESCRIPTION_FILTERS};
use crate::raw_texture_as_texture; use crate::raw_texture_as_texture;
use crate::utils::SampleCountMap;
use crate::{ use crate::{
as_texture, backend::WgpuRenderBackend, descriptors::Descriptors, target::RenderTarget, Texture, as_texture, backend::WgpuRenderBackend, descriptors::Descriptors, target::RenderTarget, Texture,
}; };
@ -32,7 +31,7 @@ use crate::{
pub struct PixelBenderWgpuShader { pub struct PixelBenderWgpuShader {
bind_group_layout: wgpu::BindGroupLayout, bind_group_layout: wgpu::BindGroupLayout,
pipeline_layout: PipelineLayout, pipeline_layout: PipelineLayout,
pipelines: SampleCountMap<OnceLock<RenderPipeline>>, pipelines: RefCell<HashMap<(u32, wgpu::TextureFormat), Arc<RenderPipeline>>>,
vertex_shader: wgpu::ShaderModule, vertex_shader: wgpu::ShaderModule,
fragment_shader: wgpu::ShaderModule, fragment_shader: wgpu::ShaderModule,
shader: PixelBenderShader, shader: PixelBenderShader,
@ -46,8 +45,17 @@ pub struct PixelBenderWgpuShader {
impl PixelBenderWgpuShader { impl PixelBenderWgpuShader {
/// Gets a `RenderPipeline` for the specified sample count /// Gets a `RenderPipeline` for the specified sample count
fn get_pipeline(&self, descriptors: &Descriptors, samples: u32) -> &wgpu::RenderPipeline { fn get_pipeline(
self.pipelines.get_or_init(samples, || { &self,
descriptors: &Descriptors,
samples: u32,
format: TextureFormat,
) -> Arc<wgpu::RenderPipeline> {
self.pipelines
.borrow_mut()
.entry((samples, format))
.or_insert_with(|| {
Arc::new(
descriptors descriptors
.device .device
.create_render_pipeline(&RenderPipelineDescriptor { .create_render_pipeline(&RenderPipelineDescriptor {
@ -62,7 +70,7 @@ impl PixelBenderWgpuShader {
module: &self.fragment_shader, module: &self.fragment_shader,
entry_point: naga_pixelbender::FRAGMENT_SHADER_ENTRYPOINT, entry_point: naga_pixelbender::FRAGMENT_SHADER_ENTRYPOINT,
targets: &[Some(ColorTargetState { targets: &[Some(ColorTargetState {
format: TextureFormat::Rgba8Unorm, format,
// FIXME - what should this be? // FIXME - what should this be?
blend: Some(wgpu::BlendState { blend: Some(wgpu::BlendState {
color: BlendComponent::OVER, color: BlendComponent::OVER,
@ -79,8 +87,10 @@ impl PixelBenderWgpuShader {
alpha_to_coverage_enabled: false, alpha_to_coverage_enabled: false,
}, },
multiview: Default::default(), multiview: Default::default(),
}),
)
}) })
}) .clone()
} }
} }
@ -250,10 +260,104 @@ impl PixelBenderWgpuShader {
} }
} }
fn image_input_as_texture<'a>(input: &'a ImageInputTexture<'a>) -> &wgpu::Texture { enum BorrowedOrOwnedTexture<'a> {
Borrowed(&'a wgpu::Texture),
Owned(wgpu::Texture),
}
impl<'a> std::ops::Deref for BorrowedOrOwnedTexture<'a> {
type Target = wgpu::Texture;
fn deref(&self) -> &Self::Target {
match self {
BorrowedOrOwnedTexture::Borrowed(t) => t,
BorrowedOrOwnedTexture::Owned(t) => t,
}
}
}
/// The texture format to use for the temporary texture we create when reading/writing
/// from raw bytes (ByteArray to Vector.<Number>). We use a Float texture to be able to
/// pass in floating-point values directly, without converting on the host side.
/// In the special case with 3 channels, we use `Rgba32Float` since wgpu lacks a `Rgb32Float`
/// texture. We handle this by manually inserting and removing padding to keep the pixels
/// at the correct positions. This isn't ideal, but allows us to keep the naga code generation
/// simple.
pub(super) fn temporary_texture_format_for_channels(channels: u32) -> wgpu::TextureFormat {
match channels {
1 => wgpu::TextureFormat::R32Float,
2 => wgpu::TextureFormat::Rg32Float,
3 => wgpu::TextureFormat::Rgba32Float,
4 => wgpu::TextureFormat::Rgba32Float,
_ => panic!("Unsupported number of channels: {}", channels),
}
}
fn image_input_as_texture<'a>(
descriptors: &Descriptors,
input: &'a ImageInputTexture<'a>,
) -> BorrowedOrOwnedTexture<'a> {
match input { match input {
ImageInputTexture::Bitmap(handle) => &as_texture(handle).texture, ImageInputTexture::Bitmap(handle) => {
ImageInputTexture::TextureRef(raw_texture) => raw_texture_as_texture(*raw_texture), BorrowedOrOwnedTexture::Borrowed(&as_texture(handle).texture)
}
ImageInputTexture::TextureRef(raw_texture) => {
BorrowedOrOwnedTexture::Borrowed(raw_texture_as_texture(*raw_texture))
}
ImageInputTexture::Bytes {
width,
height,
channels,
bytes,
} => {
let extent = wgpu::Extent3d {
width: *width,
height: *height,
depth_or_array_layers: 1,
};
let texture_format = temporary_texture_format_for_channels(*channels);
// We're going to be using an Rgba32Float texture, so we need to pad the bytes
// with zeros for the alpha channel. The PixelBender code will only ever try to
// use the first 3 channels (since it was compiled with a 3-channel input),
// so it doesn't matter what value we choose here.
let padded_bytes = if *channels == 3 {
let mut padded_bytes = Vec::with_capacity(bytes.len() * 4 / 3);
for chunk in bytes.chunks_exact(12) {
padded_bytes.extend_from_slice(chunk);
padded_bytes.extend_from_slice(&[0, 0, 0, 0]);
}
Cow::Owned(padded_bytes)
} else {
Cow::Borrowed(bytes)
};
let fresh_texture = descriptors.device.create_texture(&TextureDescriptor {
label: Some("Temporary PixelBender output texture"),
size: extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: texture_format,
usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[texture_format],
});
descriptors.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &fresh_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&padded_bytes,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(padded_bytes.len() as u32 / height),
rows_per_image: None,
},
extent,
);
BorrowedOrOwnedTexture::Owned(fresh_texture)
}
} }
} }
@ -362,8 +466,15 @@ pub(super) fn run_pixelbender_shader_impl(
for input in &mut arguments { for input in &mut arguments {
match input { match input {
PixelBenderShaderArgument::ImageInput { index, texture, .. } => { PixelBenderShaderArgument::ImageInput { index, texture, .. } => {
let input_texture = &image_input_as_texture(texture.as_ref().unwrap()); let input_texture = &image_input_as_texture(descriptors, texture.as_ref().unwrap());
if std::ptr::eq(*input_texture, target) { let same_source_dest =
if let BorrowedOrOwnedTexture::Borrowed(input_texture) = input_texture {
std::ptr::eq(*input_texture, target)
} else {
// When we create a fresh texture, it can never be equal to the pre-existing target
false
};
if same_source_dest {
// The input is the same as the output - we need to clone the input. // The input is the same as the output - we need to clone the input.
// We will write to the original output, and use a clone of the input as a texture input binding // We will write to the original output, and use a clone of the input as a texture input binding
let cached_fresh_handle = target_clone.get_or_insert_with(|| { let cached_fresh_handle = target_clone.get_or_insert_with(|| {
@ -408,7 +519,7 @@ pub(super) fn run_pixelbender_shader_impl(
}); });
*texture = Some(cached_fresh_handle.clone().into()); *texture = Some(cached_fresh_handle.clone().into());
} }
let wgpu_texture = image_input_as_texture(texture.as_ref().unwrap()); let wgpu_texture = image_input_as_texture(descriptors, texture.as_ref().unwrap());
texture_views.insert( texture_views.insert(
*index, *index,
wgpu_texture.create_view(&wgpu::TextureViewDescriptor::default()), wgpu_texture.create_view(&wgpu::TextureViewDescriptor::default()),
@ -520,7 +631,7 @@ pub(super) fn run_pixelbender_shader_impl(
for input in &arguments { for input in &arguments {
match input { match input {
PixelBenderShaderArgument::ImageInput { index, texture, .. } => { PixelBenderShaderArgument::ImageInput { index, texture, .. } => {
let wgpu_texture = image_input_as_texture(texture.as_ref().unwrap()); let wgpu_texture = image_input_as_texture(descriptors, texture.as_ref().unwrap());
if first_image.is_none() { if first_image.is_none() {
first_image = Some(wgpu_texture); first_image = Some(wgpu_texture);
@ -548,7 +659,7 @@ pub(super) fn run_pixelbender_shader_impl(
let vertices = source.vertices(&descriptors.device); let vertices = source.vertices(&descriptors.device);
let pipeline = compiled_shader.get_pipeline(descriptors, sample_count); let pipeline = compiled_shader.get_pipeline(descriptors, sample_count, target.format());
let mut render_pass = render_command_encoder.begin_render_pass(&wgpu::RenderPassDescriptor { let mut render_pass = render_command_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("PixelBender render pass"), label: Some("PixelBender render pass"),
@ -557,7 +668,7 @@ pub(super) fn run_pixelbender_shader_impl(
..Default::default() ..Default::default()
}); });
render_pass.set_bind_group(0, &bind_group, &[]); render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.set_pipeline(pipeline); render_pass.set_pipeline(&pipeline);
render_pass.set_vertex_buffer(0, vertices.slice(..)); render_pass.set_vertex_buffer(0, vertices.slice(..));
render_pass.set_index_buffer( render_pass.set_index_buffer(

View File

@ -199,14 +199,14 @@ impl TextureTarget {
) )
.into()); .into());
} }
let buffer_dimensions = BufferDimensions::new(size.0 as usize, size.1 as usize); let format = wgpu::TextureFormat::Rgba8Unorm;
let buffer_dimensions = BufferDimensions::new(size.0 as usize, size.1 as usize, format);
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: size.0, width: size.0,
height: size.1, height: size.1,
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let texture_label = create_debug_label!("Render target texture"); let texture_label = create_debug_label!("Render target texture");
let format = wgpu::TextureFormat::Rgba8Unorm;
let texture = device.create_texture(&wgpu::TextureDescriptor { let texture = device.create_texture(&wgpu::TextureDescriptor {
label: texture_label.as_deref(), label: texture_label.as_deref(),
size, size,

View File

@ -3,9 +3,8 @@ use crate::descriptors::Descriptors;
use crate::globals::Globals; use crate::globals::Globals;
use crate::Transforms; use crate::Transforms;
use std::borrow::Cow; use std::borrow::Cow;
use std::mem::size_of;
use wgpu::util::DeviceExt; use wgpu::util::DeviceExt;
use wgpu::CommandEncoder; use wgpu::{CommandEncoder, TextureFormat};
macro_rules! create_debug_label { macro_rules! create_debug_label {
($($arg:tt)*) => ( ($($arg:tt)*) => (
@ -99,8 +98,8 @@ pub struct BufferDimensions {
impl BufferDimensions { impl BufferDimensions {
#[allow(dead_code)] #[allow(dead_code)]
pub fn new(width: usize, height: usize) -> Self { pub fn new(width: usize, height: usize, format: TextureFormat) -> Self {
let bytes_per_pixel = size_of::<u32>(); let bytes_per_pixel = format.block_copy_size(None).unwrap() as usize;
let unpadded_bytes_per_row = width * bytes_per_pixel; let unpadded_bytes_per_row = width * bytes_per_pixel;
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align; let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align;

View File

@ -0,0 +1,47 @@
[
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "Wait"
},
{
"type": "MouseMove",
"pos": [
450.0,
450.0
]
},
{
"type": "MouseDown",
"pos": [
450.0,
450.0
],
"btn": "Left"
}
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 539 KiB

View File

@ -0,0 +1,8 @@
num_ticks = 20
[image_comparisons.output]
tolerance = 5
max_outliers = 389
[player_options]
with_renderer = { optional = false, sample_count = 1 }