2020-08-27 10:32:41 +00:00
|
|
|
use crate::utils::BufferDimensions;
|
2022-08-08 12:54:34 +00:00
|
|
|
use crate::Error;
|
2022-07-26 03:07:48 +00:00
|
|
|
use ruffle_render::utils::unmultiply_alpha_rgba;
|
2020-05-04 20:59:06 +00:00
|
|
|
use std::fmt::Debug;
|
2022-09-30 21:25:54 +00:00
|
|
|
use std::sync::Arc;
|
2020-05-04 20:59:06 +00:00
|
|
|
|
|
|
|
pub trait RenderTargetFrame: Debug {
|
avm2: Partially implement Stage3D for wgpu backend
This PR implements core 'stage3D' APIs. We are now able
to render at least two demos from the Context3D docs - a simple
triangle render, and a rotating cube.
Implemented in this PR:
* Stage3D access and Context3D creation
* IndexBuffer3D and VertexBuffer3D creation, uploading, and usage
* Program3D uploading and usage (via `naga-agal`)
* Context3D: configureBackBuffer, clear, drawTriangles, and present
Not yet implemented:
* Any 'dispose()' methods
* Depth and stencil buffers
* Context3D texture apis
* Scissor rectangle
General implementation strategy:
A new `Object` variant is added for each of the Stage3D objects
(VertexBuffer3D, Program3D, etc). This stores a handle to the
parent `Context3D`, and (depending on the object) a handle
to the underlying native resource, via `Rc<dyn
SomeRenderBackendTrait>`).
Calling methods on Context3D does not usually result in an immediate
call to a `wgpu` method. Instead, we queue up commands in our
`Context3D` instance, and execute them all on a call to `present`.
This avoids some nasty wgpu lifetime issues, and is very similar
to the approah we use for normal rendering.
The actual rendering happens on a `Texture`, with dimensions
determined by `createBackBuffer`. During 'Stage' rendering,
we render all of these Stage3D textures *behind* the normal
stage (but in front of the overall stage background color).
2022-09-18 20:50:21 +00:00
|
|
|
fn into_view(self) -> wgpu::TextureView;
|
|
|
|
|
2020-05-04 20:59:06 +00:00
|
|
|
fn view(&self) -> &wgpu::TextureView;
|
|
|
|
}
|
|
|
|
|
2020-05-11 07:02:49 +00:00
|
|
|
pub trait RenderTarget: Debug + 'static {
|
2020-05-04 20:59:06 +00:00
|
|
|
type Frame: RenderTargetFrame;
|
|
|
|
|
|
|
|
fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32);
|
|
|
|
|
|
|
|
fn format(&self) -> wgpu::TextureFormat;
|
|
|
|
|
|
|
|
fn width(&self) -> u32;
|
|
|
|
|
|
|
|
fn height(&self) -> u32;
|
|
|
|
|
2021-09-08 07:20:11 +00:00
|
|
|
fn get_next_texture(&mut self) -> Result<Self::Frame, wgpu::SurfaceError>;
|
2020-05-05 13:27:10 +00:00
|
|
|
|
2020-08-27 10:32:41 +00:00
|
|
|
fn submit<I: IntoIterator<Item = wgpu::CommandBuffer>>(
|
2020-05-05 13:27:10 +00:00
|
|
|
&self,
|
|
|
|
device: &wgpu::Device,
|
|
|
|
queue: &wgpu::Queue,
|
2020-08-27 10:32:41 +00:00
|
|
|
command_buffers: I,
|
2021-09-08 07:20:11 +00:00
|
|
|
frame: Self::Frame,
|
2020-05-05 13:27:10 +00:00
|
|
|
);
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct SwapChainTarget {
|
|
|
|
window_surface: wgpu::Surface,
|
2021-09-08 07:20:11 +00:00
|
|
|
surface_config: wgpu::SurfaceConfiguration,
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
2021-09-08 07:20:11 +00:00
|
|
|
pub struct SwapChainTargetFrame {
|
|
|
|
texture: wgpu::SurfaceTexture,
|
|
|
|
view: wgpu::TextureView,
|
|
|
|
}
|
2020-05-04 20:59:06 +00:00
|
|
|
|
|
|
|
impl RenderTargetFrame for SwapChainTargetFrame {
|
avm2: Partially implement Stage3D for wgpu backend
This PR implements core 'stage3D' APIs. We are now able
to render at least two demos from the Context3D docs - a simple
triangle render, and a rotating cube.
Implemented in this PR:
* Stage3D access and Context3D creation
* IndexBuffer3D and VertexBuffer3D creation, uploading, and usage
* Program3D uploading and usage (via `naga-agal`)
* Context3D: configureBackBuffer, clear, drawTriangles, and present
Not yet implemented:
* Any 'dispose()' methods
* Depth and stencil buffers
* Context3D texture apis
* Scissor rectangle
General implementation strategy:
A new `Object` variant is added for each of the Stage3D objects
(VertexBuffer3D, Program3D, etc). This stores a handle to the
parent `Context3D`, and (depending on the object) a handle
to the underlying native resource, via `Rc<dyn
SomeRenderBackendTrait>`).
Calling methods on Context3D does not usually result in an immediate
call to a `wgpu` method. Instead, we queue up commands in our
`Context3D` instance, and execute them all on a call to `present`.
This avoids some nasty wgpu lifetime issues, and is very similar
to the approah we use for normal rendering.
The actual rendering happens on a `Texture`, with dimensions
determined by `createBackBuffer`. During 'Stage' rendering,
we render all of these Stage3D textures *behind* the normal
stage (but in front of the overall stage background color).
2022-09-18 20:50:21 +00:00
|
|
|
fn into_view(self) -> wgpu::TextureView {
|
|
|
|
self.view
|
|
|
|
}
|
|
|
|
|
2020-05-04 20:59:06 +00:00
|
|
|
fn view(&self) -> &wgpu::TextureView {
|
2021-09-08 07:20:11 +00:00
|
|
|
&self.view
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SwapChainTarget {
|
2021-09-08 01:55:26 +00:00
|
|
|
pub fn new(
|
|
|
|
surface: wgpu::Surface,
|
2022-10-07 10:17:41 +00:00
|
|
|
adapter: &wgpu::Adapter,
|
|
|
|
(width, height): (u32, u32),
|
2021-09-08 01:55:26 +00:00
|
|
|
device: &wgpu::Device,
|
|
|
|
) -> Self {
|
2022-10-07 10:17:41 +00:00
|
|
|
// Ideally we want to use an RGBA non-sRGB surface format, because Flash colors and
|
|
|
|
// blending are done in sRGB space -- we don't want the GPU to adjust the colors.
|
|
|
|
// Some platforms may only support an sRGB surface, in which case we will draw to an
|
|
|
|
// intermediate linear buffer and then copy to the sRGB surface.
|
|
|
|
let formats = surface.get_supported_formats(adapter);
|
|
|
|
let format = formats
|
|
|
|
.iter()
|
|
|
|
.find(|format| {
|
|
|
|
matches!(
|
|
|
|
format,
|
|
|
|
wgpu::TextureFormat::Rgba8Unorm | wgpu::TextureFormat::Bgra8Unorm
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.or_else(|| formats.first())
|
|
|
|
.copied()
|
|
|
|
// No surface (rendering to texture), default to linear RBGA.
|
|
|
|
.unwrap_or(wgpu::TextureFormat::Rgba8Unorm);
|
|
|
|
|
2021-09-08 07:20:11 +00:00
|
|
|
let surface_config = wgpu::SurfaceConfiguration {
|
|
|
|
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
2021-09-08 01:55:26 +00:00
|
|
|
format,
|
2022-10-07 10:17:41 +00:00
|
|
|
width,
|
|
|
|
height,
|
2022-07-02 12:18:30 +00:00
|
|
|
present_mode: wgpu::PresentMode::Fifo,
|
2022-10-07 10:17:41 +00:00
|
|
|
alpha_mode: surface.get_supported_alpha_modes(adapter)[0],
|
2020-05-04 20:59:06 +00:00
|
|
|
};
|
2021-09-08 07:20:11 +00:00
|
|
|
surface.configure(device, &surface_config);
|
2020-05-04 20:59:06 +00:00
|
|
|
Self {
|
2021-09-08 07:20:11 +00:00
|
|
|
surface_config,
|
2020-05-14 19:44:31 +00:00
|
|
|
window_surface: surface,
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RenderTarget for SwapChainTarget {
|
|
|
|
type Frame = SwapChainTargetFrame;
|
|
|
|
|
|
|
|
fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
|
2021-09-08 07:20:11 +00:00
|
|
|
self.surface_config.width = width;
|
|
|
|
self.surface_config.height = height;
|
|
|
|
self.window_surface.configure(device, &self.surface_config);
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn format(&self) -> wgpu::TextureFormat {
|
2021-09-08 07:20:11 +00:00
|
|
|
self.surface_config.format
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn width(&self) -> u32 {
|
2021-09-08 07:20:11 +00:00
|
|
|
self.surface_config.width
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn height(&self) -> u32 {
|
2021-09-08 07:20:11 +00:00
|
|
|
self.surface_config.height
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 07:20:11 +00:00
|
|
|
fn get_next_texture(&mut self) -> Result<Self::Frame, wgpu::SurfaceError> {
|
|
|
|
let texture = self.window_surface.get_current_texture()?;
|
|
|
|
let view = texture.texture.create_view(&Default::default());
|
|
|
|
Ok(SwapChainTargetFrame { texture, view })
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
2020-05-05 13:27:10 +00:00
|
|
|
|
2020-08-27 10:32:41 +00:00
|
|
|
fn submit<I: IntoIterator<Item = wgpu::CommandBuffer>>(
|
2020-05-05 13:27:10 +00:00
|
|
|
&self,
|
|
|
|
_device: &wgpu::Device,
|
|
|
|
queue: &wgpu::Queue,
|
2020-08-27 10:32:41 +00:00
|
|
|
command_buffers: I,
|
2021-09-08 07:20:11 +00:00
|
|
|
frame: Self::Frame,
|
2020-05-05 13:27:10 +00:00
|
|
|
) {
|
|
|
|
queue.submit(command_buffers);
|
2021-09-08 07:20:11 +00:00
|
|
|
frame.texture.present();
|
2020-05-05 13:27:10 +00:00
|
|
|
}
|
2020-05-04 20:59:06 +00:00
|
|
|
}
|
2020-05-04 21:33:45 +00:00
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct TextureTarget {
|
2022-09-06 21:38:48 +00:00
|
|
|
pub size: wgpu::Extent3d,
|
2022-09-30 21:25:54 +00:00
|
|
|
pub texture: Arc<wgpu::Texture>,
|
2022-09-06 21:38:48 +00:00
|
|
|
pub format: wgpu::TextureFormat,
|
2022-09-30 21:25:54 +00:00
|
|
|
pub buffer: Arc<wgpu::Buffer>,
|
2022-09-06 21:38:48 +00:00
|
|
|
pub buffer_dimensions: BufferDimensions,
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct TextureTargetFrame(wgpu::TextureView);
|
|
|
|
|
|
|
|
impl RenderTargetFrame for TextureTargetFrame {
|
|
|
|
fn view(&self) -> &wgpu::TextureView {
|
|
|
|
&self.0
|
|
|
|
}
|
avm2: Partially implement Stage3D for wgpu backend
This PR implements core 'stage3D' APIs. We are now able
to render at least two demos from the Context3D docs - a simple
triangle render, and a rotating cube.
Implemented in this PR:
* Stage3D access and Context3D creation
* IndexBuffer3D and VertexBuffer3D creation, uploading, and usage
* Program3D uploading and usage (via `naga-agal`)
* Context3D: configureBackBuffer, clear, drawTriangles, and present
Not yet implemented:
* Any 'dispose()' methods
* Depth and stencil buffers
* Context3D texture apis
* Scissor rectangle
General implementation strategy:
A new `Object` variant is added for each of the Stage3D objects
(VertexBuffer3D, Program3D, etc). This stores a handle to the
parent `Context3D`, and (depending on the object) a handle
to the underlying native resource, via `Rc<dyn
SomeRenderBackendTrait>`).
Calling methods on Context3D does not usually result in an immediate
call to a `wgpu` method. Instead, we queue up commands in our
`Context3D` instance, and execute them all on a call to `present`.
This avoids some nasty wgpu lifetime issues, and is very similar
to the approah we use for normal rendering.
The actual rendering happens on a `Texture`, with dimensions
determined by `createBackBuffer`. During 'Stage' rendering,
we render all of these Stage3D textures *behind* the normal
stage (but in front of the overall stage background color).
2022-09-18 20:50:21 +00:00
|
|
|
|
|
|
|
fn into_view(self) -> wgpu::TextureView {
|
|
|
|
self.0
|
|
|
|
}
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TextureTarget {
|
2022-08-08 12:54:34 +00:00
|
|
|
pub fn new(device: &wgpu::Device, size: (u32, u32)) -> Result<Self, Error> {
|
|
|
|
if size.0 > device.limits().max_texture_dimension_2d
|
|
|
|
|| size.1 > device.limits().max_texture_dimension_2d
|
2022-09-07 03:51:01 +00:00
|
|
|
|| size.0 < 1
|
|
|
|
|| size.1 < 1
|
2022-08-08 12:54:34 +00:00
|
|
|
{
|
|
|
|
return Err(format!(
|
2022-09-07 03:51:01 +00:00
|
|
|
"Texture target cannot be smaller than 1 or larger than {}px on either dimension (requested {} x {})",
|
2022-08-08 12:54:34 +00:00
|
|
|
device.limits().max_texture_dimension_2d,
|
|
|
|
size.0,
|
|
|
|
size.1
|
|
|
|
)
|
|
|
|
.into());
|
|
|
|
}
|
2020-08-27 10:32:41 +00:00
|
|
|
let buffer_dimensions = BufferDimensions::new(size.0 as usize, size.1 as usize);
|
2020-05-04 21:33:45 +00:00
|
|
|
let size = wgpu::Extent3d {
|
|
|
|
width: size.0,
|
|
|
|
height: size.1,
|
2021-04-16 20:55:31 +00:00
|
|
|
depth_or_array_layers: 1,
|
2020-05-04 21:33:45 +00:00
|
|
|
};
|
2020-05-05 13:27:10 +00:00
|
|
|
let texture_label = create_debug_label!("Render target texture");
|
2022-02-14 20:43:26 +00:00
|
|
|
let format = wgpu::TextureFormat::Rgba8Unorm;
|
2020-05-04 21:33:45 +00:00
|
|
|
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
2020-05-05 13:27:10 +00:00
|
|
|
label: texture_label.as_deref(),
|
2020-05-04 21:33:45 +00:00
|
|
|
size,
|
|
|
|
mip_level_count: 1,
|
|
|
|
sample_count: 1,
|
|
|
|
dimension: wgpu::TextureDimension::D2,
|
|
|
|
format,
|
2021-09-08 07:20:11 +00:00
|
|
|
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
|
2020-05-04 21:33:45 +00:00
|
|
|
});
|
2020-05-05 13:27:10 +00:00
|
|
|
let buffer_label = create_debug_label!("Render target buffer");
|
|
|
|
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
|
|
|
label: buffer_label.as_deref(),
|
2021-04-16 20:55:31 +00:00
|
|
|
size: (buffer_dimensions.padded_bytes_per_row.get() as u64
|
|
|
|
* buffer_dimensions.height as u64),
|
2021-09-08 07:20:11 +00:00
|
|
|
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
|
2020-08-27 10:32:41 +00:00
|
|
|
mapped_at_creation: false,
|
2020-05-05 13:27:10 +00:00
|
|
|
});
|
2022-08-08 12:54:34 +00:00
|
|
|
Ok(Self {
|
2020-05-04 21:33:45 +00:00
|
|
|
size,
|
2022-09-30 21:25:54 +00:00
|
|
|
texture: Arc::new(texture),
|
2020-05-04 21:33:45 +00:00
|
|
|
format,
|
2022-09-30 21:25:54 +00:00
|
|
|
buffer: Arc::new(buffer),
|
2020-08-27 10:32:41 +00:00
|
|
|
buffer_dimensions,
|
2022-08-08 12:54:34 +00:00
|
|
|
})
|
2020-05-05 13:27:10 +00:00
|
|
|
}
|
|
|
|
|
2022-07-26 03:07:48 +00:00
|
|
|
/// Captures the current contents of our texture buffer
|
2022-09-09 17:52:58 +00:00
|
|
|
/// as an `RgbaImage`
|
|
|
|
pub fn capture(
|
|
|
|
&self,
|
|
|
|
device: &wgpu::Device,
|
|
|
|
premultiplied_alpha: bool,
|
|
|
|
) -> Option<image::RgbaImage> {
|
2022-07-02 19:50:36 +00:00
|
|
|
let (sender, receiver) = std::sync::mpsc::channel();
|
|
|
|
let buffer_slice = self.buffer.slice(..);
|
|
|
|
buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
|
|
|
|
sender.send(result).unwrap();
|
|
|
|
});
|
|
|
|
device.poll(wgpu::Maintain::Wait);
|
2022-07-02 12:18:30 +00:00
|
|
|
let result = receiver.recv().unwrap();
|
|
|
|
match result {
|
2020-08-27 10:32:41 +00:00
|
|
|
Ok(()) => {
|
2022-07-02 19:50:36 +00:00
|
|
|
let map = buffer_slice.get_mapped_range();
|
2020-08-27 10:32:41 +00:00
|
|
|
let mut buffer = Vec::with_capacity(
|
|
|
|
self.buffer_dimensions.height * self.buffer_dimensions.unpadded_bytes_per_row,
|
2020-05-13 19:47:19 +00:00
|
|
|
);
|
2020-08-27 10:32:41 +00:00
|
|
|
|
2021-04-16 20:55:31 +00:00
|
|
|
for chunk in map.chunks(self.buffer_dimensions.padded_bytes_per_row.get() as usize)
|
|
|
|
{
|
2020-08-27 10:32:41 +00:00
|
|
|
buffer
|
|
|
|
.extend_from_slice(&chunk[..self.buffer_dimensions.unpadded_bytes_per_row]);
|
|
|
|
}
|
|
|
|
|
2022-09-09 17:52:58 +00:00
|
|
|
// The image copied from the GPU uses premultiplied alpha, so
|
|
|
|
// convert to straight alpha if requested by the user.
|
|
|
|
if !premultiplied_alpha {
|
|
|
|
unmultiply_alpha_rgba(&mut buffer);
|
|
|
|
}
|
2022-07-26 03:07:48 +00:00
|
|
|
|
2022-02-14 20:43:26 +00:00
|
|
|
let image = image::RgbaImage::from_raw(self.size.width, self.size.height, buffer);
|
2020-12-27 03:57:06 +00:00
|
|
|
drop(map);
|
|
|
|
self.buffer.unmap();
|
2022-02-14 20:43:26 +00:00
|
|
|
image
|
2020-05-05 13:27:10 +00:00
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
log::error!("Unknown error reading capture buffer: {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RenderTarget for TextureTarget {
|
|
|
|
type Frame = TextureTargetFrame;
|
|
|
|
|
|
|
|
fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
|
2022-08-04 05:50:18 +00:00
|
|
|
*self =
|
|
|
|
TextureTarget::new(device, (width, height)).expect("Unable to resize texture target");
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn format(&self) -> wgpu::TextureFormat {
|
|
|
|
self.format
|
|
|
|
}
|
|
|
|
|
|
|
|
fn width(&self) -> u32 {
|
|
|
|
self.size.width
|
|
|
|
}
|
|
|
|
|
|
|
|
fn height(&self) -> u32 {
|
|
|
|
self.size.height
|
|
|
|
}
|
|
|
|
|
2021-09-08 07:20:11 +00:00
|
|
|
fn get_next_texture(&mut self) -> Result<Self::Frame, wgpu::SurfaceError> {
|
2020-08-27 10:32:41 +00:00
|
|
|
Ok(TextureTargetFrame(
|
|
|
|
self.texture.create_view(&Default::default()),
|
|
|
|
))
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|
2020-05-05 13:27:10 +00:00
|
|
|
|
2020-08-27 10:32:41 +00:00
|
|
|
fn submit<I: IntoIterator<Item = wgpu::CommandBuffer>>(
|
2020-05-05 13:27:10 +00:00
|
|
|
&self,
|
|
|
|
device: &wgpu::Device,
|
|
|
|
queue: &wgpu::Queue,
|
2020-08-27 10:32:41 +00:00
|
|
|
command_buffers: I,
|
2021-09-08 07:20:11 +00:00
|
|
|
_frame: Self::Frame,
|
2020-05-05 13:27:10 +00:00
|
|
|
) {
|
|
|
|
let label = create_debug_label!("Render target transfer encoder");
|
|
|
|
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
|
|
|
label: label.as_deref(),
|
|
|
|
});
|
|
|
|
encoder.copy_texture_to_buffer(
|
2021-04-16 20:55:31 +00:00
|
|
|
wgpu::ImageCopyTexture {
|
2020-05-05 13:27:10 +00:00
|
|
|
texture: &self.texture,
|
|
|
|
mip_level: 0,
|
|
|
|
origin: wgpu::Origin3d::ZERO,
|
2021-09-08 07:20:11 +00:00
|
|
|
aspect: wgpu::TextureAspect::All,
|
2020-05-05 13:27:10 +00:00
|
|
|
},
|
2021-04-16 20:55:31 +00:00
|
|
|
wgpu::ImageCopyBuffer {
|
2020-05-05 13:27:10 +00:00
|
|
|
buffer: &self.buffer,
|
2021-04-16 20:55:31 +00:00
|
|
|
layout: wgpu::ImageDataLayout {
|
2020-08-27 10:32:41 +00:00
|
|
|
offset: 0,
|
2021-04-16 20:55:31 +00:00
|
|
|
bytes_per_row: Some(self.buffer_dimensions.padded_bytes_per_row),
|
|
|
|
rows_per_image: None,
|
2020-08-27 10:32:41 +00:00
|
|
|
},
|
2020-05-05 13:27:10 +00:00
|
|
|
},
|
|
|
|
self.size,
|
|
|
|
);
|
2020-08-27 10:32:41 +00:00
|
|
|
queue.submit(command_buffers.into_iter().chain(Some(encoder.finish())));
|
2020-05-05 13:27:10 +00:00
|
|
|
}
|
2020-05-04 21:33:45 +00:00
|
|
|
}
|