desktop: Render game separately to UI, blit both onto surface

This commit is contained in:
Nathan Adams 2023-05-21 01:50:27 +02:00
parent 5bf43f9025
commit b9322ba93a
10 changed files with 346 additions and 90 deletions

1
Cargo.lock generated
View File

@ -3726,6 +3726,7 @@ dependencies = [
"egui-wgpu",
"egui-winit",
"embed-resource",
"futures",
"generational-arena",
"isahc",
"os_info",

View File

@ -33,6 +33,7 @@ os_info = { version = "3", default-features = false }
unic-langid = "0.9.1"
sys-locale = "0.3.0"
wgpu = { version = "0.16.0" }
futures = "0.3.28"
# Deliberately held back to match tracy client used by profiling crate
tracing-tracy = { version = "=0.10.0", optional = true }

47
desktop/src/blit.wgsl Normal file
View File

@ -0,0 +1,47 @@
// Vertex shader bindings
struct VertexOutput {
@location(0) tex_coord: vec2<f32>,
@builtin(position) position: vec4<f32>,
};
/// Converts a color from sRGB to linear color space.
fn srgb_to_linear(srgb: vec4<f32>) -> vec4<f32> {
var rgb: vec3<f32> = srgb.rgb;
if( srgb.a > 0.0 ) {
rgb = rgb / srgb.a;
}
let a = rgb / 12.92;
let b = pow((rgb + vec3<f32>(0.055)) / 1.055, vec3<f32>(2.4));
let c = step(vec3<f32>(0.04045), rgb);
return vec4<f32>(mix(a, b, c) * srgb.a, srgb.a);
}
@vertex
fn vs_main(
@location(0) a_pos: vec2<f32>,
@location(1) a_tex_coord: vec2<f32>,
) -> VertexOutput {
var out: VertexOutput;
out.tex_coord = a_tex_coord;
out.position = vec4<f32>(a_pos, 0.0, 1.0);
return out;
}
// Fragment shader bindings
@group(0) @binding(0) var r_tex_color: texture_2d<f32>;
@group(0) @binding(1) var r_tex_sampler: sampler;
@fragment
fn fs_main_linear_framebuffer(in: VertexOutput) -> @location(0) vec4<f32> {
// We always have a linear texture at the moment.
return textureSample(r_tex_color, r_tex_sampler, in.tex_coord);
}
@fragment
fn fs_main_srgb_framebuffer(in: VertexOutput) -> @location(0) vec4<f32> {
// We always have a linear texture at the moment.
let tex = textureSample(r_tex_color, r_tex_sampler, in.tex_coord);
return srgb_to_linear(tex);
}

View File

@ -1,12 +1,21 @@
use crate::custom_event::RuffleEvent;
use anyhow::{anyhow, Result};
use egui::*;
use ruffle_render_wgpu::backend::request_adapter_and_device;
use ruffle_render_wgpu::descriptors::Descriptors;
use ruffle_render_wgpu::utils::{format_list, get_backend_names};
use std::borrow::Cow;
use std::path::Path;
use std::rc::Rc;
use std::sync::Arc;
use std::time::{Duration, Instant};
use wgpu::util::DeviceExt;
use winit::event_loop::{EventLoop, EventLoopProxy};
use winit::window::Window;
/// Integration layer conneting wgpu+winit to egui.
pub struct GuiController {
descriptors: Arc<Descriptors>,
egui_ctx: egui::Context,
egui_winit: egui_winit::State,
egui_renderer: egui_wgpu::renderer::Renderer,
@ -14,25 +23,166 @@ pub struct GuiController {
window: Rc<Window>,
last_update: Instant,
repaint_after: Duration,
surface: wgpu::Surface,
surface_format: wgpu::TextureFormat,
blit_bind_group_layout: wgpu::BindGroupLayout,
blit_pipeline: wgpu::RenderPipeline,
blit_sampler: wgpu::Sampler,
blit_vertices: wgpu::Buffer,
}
// x y u v
const BLIT_VERTICES: [[f32; 4]; 6] = [
[-1.0, 1.0, 0.0, 0.0], // tl
[1.0, 1.0, 1.0, 0.0], // tr
[1.0, -1.0, 1.0, 1.0], // br
[1.0, -1.0, 1.0, 1.0], // br
[-1.0, -1.0, 0.0, 1.0], // bl
[-1.0, 1.0, 0.0, 0.0], // tl
];
impl GuiController {
pub fn new<T: ruffle_render_wgpu::target::RenderTarget>(
renderer: &ruffle_render_wgpu::backend::WgpuRenderBackend<T>,
pub fn new(
window: Rc<Window>,
event_loop: &EventLoop<RuffleEvent>,
) -> Self {
trace_path: Option<&Path>,
backend: wgpu::Backends,
power_preference: wgpu::PowerPreference,
) -> Result<Self> {
if wgpu::Backends::SECONDARY.contains(backend) {
tracing::warn!(
"{} graphics backend support may not be fully supported.",
format_list(&get_backend_names(backend), "and")
);
}
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: backend,
dx12_shader_compiler: wgpu::Dx12Compiler::default(),
});
let surface = unsafe { instance.create_surface(window.as_ref()) }?;
let (adapter, device, queue) = futures::executor::block_on(request_adapter_and_device(
backend,
instance,
Some(&surface),
power_preference,
trace_path,
))
.map_err(|e| anyhow!(e.to_string()))?;
let descriptors = Descriptors::new(adapter, device, queue);
let egui_ctx = Context::default();
let mut egui_winit = egui_winit::State::new(event_loop);
egui_winit.set_pixels_per_point(window.scale_factor() as f32);
egui_winit
.set_max_texture_side(renderer.descriptors().limits.max_texture_dimension_2d as usize);
egui_winit.set_max_texture_side(descriptors.limits.max_texture_dimension_2d as usize);
let surface_format = surface
.get_capabilities(&descriptors.adapter)
.formats
.first()
.cloned()
.expect("At least one format should be supported");
let target_format = renderer.target().format();
let egui_renderer = egui_wgpu::Renderer::new(renderer.device(), target_format, None, 1);
let module = descriptors
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("blit.wgsl"))),
});
let bind_group_layout =
descriptors
.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let sampler = descriptors.device.create_sampler(&wgpu::SamplerDescriptor {
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let pipeline_layout =
descriptors
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let pipeline = descriptors
.device
.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
entry_point: "vs_main",
module: &module,
buffers: &[wgpu::VertexBufferLayout {
array_stride: 4 * 4,
step_mode: wgpu::VertexStepMode::Vertex,
// 0: vec2 position
// 1: vec2 texture coordinates
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2],
}],
},
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
unclipped_depth: false,
conservative: false,
cull_mode: None,
front_face: wgpu::FrontFace::default(),
polygon_mode: wgpu::PolygonMode::default(),
strip_index_format: None,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
alpha_to_coverage_enabled: false,
count: 1,
mask: !0,
},
fragment: Some(wgpu::FragmentState {
module: &module,
entry_point: if surface_format.is_srgb() {
"fs_main_srgb_framebuffer"
} else {
"fs_main_linear_framebuffer"
},
targets: &[Some(wgpu::ColorTargetState {
format: surface_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
});
let vertices = descriptors
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&BLIT_VERTICES),
usage: wgpu::BufferUsages::VERTEX,
});
let egui_renderer = egui_wgpu::Renderer::new(&descriptors.device, surface_format, None, 1);
let event_loop = event_loop.create_proxy();
let gui = RuffleGui::new(event_loop);
Self {
Ok(Self {
descriptors: Arc::new(descriptors),
egui_ctx,
egui_winit,
egui_renderer,
@ -40,11 +190,35 @@ impl GuiController {
window,
last_update: Instant::now(),
repaint_after: Duration::ZERO,
surface,
surface_format,
blit_bind_group_layout: bind_group_layout,
blit_pipeline: pipeline,
blit_sampler: sampler,
blit_vertices: vertices,
})
}
pub fn descriptors(&self) -> &Arc<Descriptors> {
&self.descriptors
}
#[must_use]
pub fn handle_event(&mut self, event: &winit::event::WindowEvent) -> bool {
if let winit::event::WindowEvent::Resized(size) = &event {
self.surface.configure(
&self.descriptors.device,
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: self.surface_format,
width: size.width,
height: size.height,
present_mode: Default::default(),
alpha_mode: Default::default(),
view_formats: Default::default(),
},
);
}
let response = self.egui_winit.on_event(&self.egui_ctx, event);
if response.repaint {
self.window.request_redraw();
@ -52,10 +226,13 @@ impl GuiController {
response.consumed
}
pub fn render(
&mut self,
render_ctx: ruffle_render_wgpu::backend::RenderCallbackParams,
) -> Vec<wgpu::CommandBuffer> {
pub fn render(&mut self, movie: &wgpu::Texture) {
let surface_texture = self
.surface
.get_current_texture()
.expect("Surface became unavailable");
let movie_view = &movie.create_view(&Default::default());
let raw_input = self.egui_winit.take_egui_input(&self.window);
let full_output = self.egui_ctx.run(raw_input, |context| {
self.gui.update(context);
@ -77,7 +254,7 @@ impl GuiController {
};
let mut encoder =
render_ctx
self.descriptors
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("egui encoder"),
@ -85,28 +262,46 @@ impl GuiController {
for (id, image_delta) in &full_output.textures_delta.set {
self.egui_renderer.update_texture(
render_ctx.device,
render_ctx.queue,
&self.descriptors.device,
&self.descriptors.queue,
*id,
image_delta,
);
}
let mut command_buffers = self.egui_renderer.update_buffers(
render_ctx.device,
render_ctx.queue,
&self.descriptors.device,
&self.descriptors.queue,
&mut encoder,
&clipped_primitives,
&screen_descriptor,
);
{
let surface_view = surface_texture.texture.create_view(&Default::default());
let blit_bind_group =
self.descriptors
.device
.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &self.blit_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(movie_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.blit_sampler),
},
],
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: render_ctx.texture_view,
view: &surface_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
store: true,
},
})],
@ -114,6 +309,11 @@ impl GuiController {
label: Some("egui_render"),
});
render_pass.set_pipeline(&self.blit_pipeline);
render_pass.set_bind_group(0, &blit_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.blit_vertices.slice(..));
render_pass.draw(0..6, 0..1);
self.egui_renderer
.render(&mut render_pass, &clipped_primitives, &screen_descriptor);
}
@ -123,7 +323,8 @@ impl GuiController {
}
command_buffers.push(encoder.finish());
command_buffers
self.descriptors.queue.submit(command_buffers);
surface_texture.present();
}
pub fn set_ui_visible(&mut self, value: bool) {

View File

@ -33,6 +33,7 @@ use ruffle_render::backend::RenderBackend;
use ruffle_render::quality::StageQuality;
use ruffle_render_wgpu::backend::WgpuRenderBackend;
use ruffle_render_wgpu::clap::{GraphicsBackend, PowerPreference};
use ruffle_render_wgpu::target::TextureTarget;
use std::cell::RefCell;
use std::io::Read;
use std::panic::PanicInfo;
@ -338,18 +339,6 @@ impl App {
opt.open_url_mode,
);
let viewport_size = window.inner_size();
let mut renderer = WgpuRenderBackend::for_window(
&window,
(viewport_size.width, viewport_size.height),
opt.graphics.into(),
opt.power.into(),
trace_path(&opt),
)
.map_err(|e| anyhow!(e.to_string()))
.context("Couldn't create wgpu rendering backend")?;
RENDER_INFO.with(|i| *i.borrow_mut() = Some(renderer.debug_info().to_string()));
let window = Rc::new(window);
if cfg!(feature = "software_video") {
@ -357,18 +346,26 @@ impl App {
builder.with_video(ruffle_video_software::backend::SoftwareVideoBackend::new());
}
let gui = Arc::new(Mutex::new(GuiController::new(
&renderer,
let gui = GuiController::new(
window.clone(),
&event_loop,
)));
{
let gui = gui.clone();
renderer.set_render_callback(Some(Box::new(move |render_ctx| {
let mut gui = gui.lock().expect("Gui lock");
gui.render(render_ctx)
})));
}
trace_path(&opt),
opt.graphics.into(),
opt.power.into(),
)?;
let viewport_size = window.inner_size();
let renderer = WgpuRenderBackend::new(
gui.descriptors().clone(),
TextureTarget::new(
&gui.descriptors().device,
(viewport_size.width, viewport_size.height),
)
.map_err(|e| anyhow!(e.to_string()))?,
)
.map_err(|e| anyhow!(e.to_string()))
.context("Couldn't create wgpu rendering backend")?;
RENDER_INFO.with(|i| *i.borrow_mut() = Some(renderer.debug_info().to_string()));
builder = builder
.with_navigator(navigator)
@ -399,7 +396,7 @@ impl App {
event_loop_proxy: event_loop.create_proxy(),
event_loop: Some(event_loop),
executor,
gui,
gui: Arc::new(Mutex::new(gui)),
player,
min_window_size,
max_window_size,
@ -491,7 +488,16 @@ impl App {
winit::event::Event::RedrawRequested(_) => {
// Don't render when minimized to avoid potential swap chain errors in `wgpu`.
if !minimized {
self.player.lock().expect("Cannot reenter").render();
let mut player = self.player.lock().expect("Cannot reenter");
player.render();
let renderer = player
.renderer_mut()
.downcast_mut::<WgpuRenderBackend<TextureTarget>>()
.expect("Renderer must be correct type");
self.gui
.lock()
.expect("Gui lock")
.render(&renderer.target().texture);
#[cfg(feature = "tracy")]
tracing_tracy::client::Client::running()
.expect("tracy client must be running")

View File

@ -6,7 +6,7 @@ use rayon::prelude::*;
use ruffle_core::limits::ExecutionLimit;
use ruffle_core::tag_utils::SwfMovie;
use ruffle_core::PlayerBuilder;
use ruffle_render_wgpu::backend::WgpuRenderBackend;
use ruffle_render_wgpu::backend::{request_adapter_and_device, WgpuRenderBackend};
use ruffle_render_wgpu::clap::{GraphicsBackend, PowerPreference};
use ruffle_render_wgpu::descriptors::Descriptors;
use ruffle_render_wgpu::target::TextureTarget;
@ -405,8 +405,7 @@ fn main() -> Result<()> {
backends: opt.graphics.into(),
dx12_shader_compiler: wgpu::Dx12Compiler::default(),
});
let (adapter, device, queue) =
futures::executor::block_on(WgpuRenderBackend::<TextureTarget>::request_device(
let (adapter, device, queue) = futures::executor::block_on(request_adapter_and_device(
opt.graphics.into(),
instance,
None,

View File

@ -60,7 +60,7 @@ impl WgpuRenderBackend<SwapChainTarget> {
dx12_shader_compiler: wgpu::Dx12Compiler::default(),
});
let surface = instance.create_surface_from_canvas(canvas)?;
let (adapter, device, queue) = Self::request_device(
let (adapter, device, queue) = request_adapter_and_device(
wgpu::Backends::BROWSER_WEBGPU | wgpu::Backends::GL,
instance,
Some(&surface),
@ -95,7 +95,7 @@ impl WgpuRenderBackend<SwapChainTarget> {
dx12_shader_compiler: wgpu::Dx12Compiler::default(),
});
let surface = unsafe { instance.create_surface(window) }?;
let (adapter, device, queue) = futures::executor::block_on(Self::request_device(
let (adapter, device, queue) = futures::executor::block_on(request_adapter_and_device(
backend,
instance,
Some(&surface),
@ -126,7 +126,7 @@ impl WgpuRenderBackend<crate::target::TextureTarget> {
backends: backend,
dx12_shader_compiler: wgpu::Dx12Compiler::default(),
});
let (adapter, device, queue) = futures::executor::block_on(Self::request_device(
let (adapter, device, queue) = futures::executor::block_on(request_adapter_and_device(
backend,
instance,
None,
@ -210,33 +210,6 @@ impl<T: RenderTarget> WgpuRenderBackend<T> {
})
}
pub async fn request_device(
backend: wgpu::Backends,
instance: wgpu::Instance,
surface: Option<&wgpu::Surface>,
power_preference: wgpu::PowerPreference,
trace_path: Option<&Path>,
) -> Result<(wgpu::Adapter, wgpu::Device, wgpu::Queue), Error> {
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: surface,
force_fallback_adapter: false,
}).await
.ok_or_else(|| {
let names = get_backend_names(backend);
if names.is_empty() {
"Ruffle requires hardware acceleration, but no compatible graphics device was found (no backend provided?)".to_string()
} else if cfg!(any(windows, target_os = "macos")) {
format!("Ruffle does not support OpenGL on {}.", if cfg!(windows) { "Windows" } else { "macOS" })
} else {
format!("Ruffle requires hardware acceleration, but no compatible graphics device was found supporting {}", format_list(&names, "or"))
}
})?;
let (device, queue) = request_device(&adapter, trace_path).await?;
Ok((adapter, device, queue))
}
fn register_shape_internal(
&mut self,
shape: DistilledShape,
@ -800,6 +773,33 @@ impl<T: RenderTarget + 'static> RenderBackend for WgpuRenderBackend<T> {
}
}
pub async fn request_adapter_and_device(
backend: wgpu::Backends,
instance: wgpu::Instance,
surface: Option<&wgpu::Surface>,
power_preference: wgpu::PowerPreference,
trace_path: Option<&Path>,
) -> Result<(wgpu::Adapter, wgpu::Device, wgpu::Queue), Error> {
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: surface,
force_fallback_adapter: false,
}).await
.ok_or_else(|| {
let names = get_backend_names(backend);
if names.is_empty() {
"Ruffle requires hardware acceleration, but no compatible graphics device was found (no backend provided?)".to_string()
} else if cfg!(any(windows, target_os = "macos")) {
format!("Ruffle does not support OpenGL on {}.", if cfg!(windows) { "Windows" } else { "macOS" })
} else {
format!("Ruffle requires hardware acceleration, but no compatible graphics device was found supporting {}", format_list(&names, "or"))
}
})?;
let (device, queue) = request_device(&adapter, trace_path).await?;
Ok((adapter, device, queue))
}
// We try to request the highest limits we can get away with
async fn request_device(
adapter: &wgpu::Adapter,

View File

@ -26,7 +26,7 @@ pub use wgpu;
type Error = Box<dyn std::error::Error>;
#[macro_use]
mod utils;
pub mod utils;
mod bitmaps;
mod context3d;

View File

@ -214,7 +214,9 @@ impl TextureTarget {
dimension: wgpu::TextureDimension::D2,
format,
view_formats: &[format],
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::TEXTURE_BINDING,
});
let buffer_label = create_debug_label!("Render target buffer");
let buffer = device.create_buffer(&wgpu::BufferDescriptor {

View File

@ -1,7 +1,6 @@
use once_cell::sync::Lazy;
use ruffle_render_wgpu::backend::WgpuRenderBackend;
use ruffle_render_wgpu::backend::request_adapter_and_device;
use ruffle_render_wgpu::descriptors::Descriptors;
use ruffle_render_wgpu::target::TextureTarget;
use ruffle_render_wgpu::wgpu;
use std::sync::Arc;
@ -18,7 +17,7 @@ use std::sync::Arc;
*/
fn create_wgpu_device() -> Option<(wgpu::Adapter, wgpu::Device, wgpu::Queue)> {
futures::executor::block_on(WgpuRenderBackend::<TextureTarget>::request_device(
futures::executor::block_on(request_adapter_and_device(
wgpu::Backends::all(),
wgpu::Instance::new(Default::default()),
None,