2021-09-07 20:25:56 +00:00
|
|
|
use bytemuck::Pod;
|
2022-09-07 04:32:15 +00:00
|
|
|
use ouroboros::self_referencing;
|
|
|
|
use std::cell::RefCell;
|
2022-03-13 22:57:06 +00:00
|
|
|
use std::{marker::PhantomData, mem};
|
2022-09-07 04:32:15 +00:00
|
|
|
use typed_arena::Arena;
|
2021-09-07 20:25:56 +00:00
|
|
|
use wgpu::util::StagingBelt;
|
|
|
|
|
|
|
|
/// A simple chunked bump allacator for managing dynamic uniforms that change per-draw.
|
|
|
|
/// Each draw call may use `UniformBuffer::write_uniforms` can be used to queue
|
|
|
|
/// the upload of uniform data to the GPU.
|
2022-09-04 19:19:16 +00:00
|
|
|
pub struct UniformBuffer<'a, T: Pod> {
|
2022-09-07 04:32:15 +00:00
|
|
|
buffers: &'a BufferStorage<T>,
|
2021-09-07 20:25:56 +00:00
|
|
|
cur_block: usize,
|
|
|
|
cur_offset: u32,
|
2022-09-04 19:19:16 +00:00
|
|
|
}
|
|
|
|
|
2022-09-07 04:32:15 +00:00
|
|
|
#[self_referencing]
|
2022-09-04 19:19:16 +00:00
|
|
|
pub struct BufferStorage<T: Pod> {
|
2022-09-07 04:32:15 +00:00
|
|
|
phantom: PhantomData<T>,
|
|
|
|
arena: Arena<Block>,
|
|
|
|
|
|
|
|
#[borrows(arena)]
|
|
|
|
#[not_covariant]
|
|
|
|
allocator: RefCell<Allocator<'this>>,
|
|
|
|
|
|
|
|
staging_belt: RefCell<StagingBelt>,
|
2022-09-04 19:19:16 +00:00
|
|
|
aligned_uniforms_size: u32,
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
2022-09-07 04:32:15 +00:00
|
|
|
struct Allocator<'a> {
|
|
|
|
arena: &'a Arena<Block>,
|
|
|
|
blocks: Vec<&'a Block>,
|
|
|
|
}
|
2021-09-07 20:25:56 +00:00
|
|
|
|
2022-09-04 19:19:16 +00:00
|
|
|
impl<T: Pod> BufferStorage<T> {
|
2021-09-07 20:25:56 +00:00
|
|
|
/// The size of each block.
|
|
|
|
/// Uniforms are copied into each block until it reaches capacity, at which point a new
|
|
|
|
/// block will be allocated.
|
2022-09-04 19:19:16 +00:00
|
|
|
pub const BLOCK_SIZE: u32 = 65536;
|
2021-09-07 20:25:56 +00:00
|
|
|
|
|
|
|
/// The uniform data size for a single draw call.
|
2022-09-04 19:19:16 +00:00
|
|
|
pub const UNIFORMS_SIZE: u64 = mem::size_of::<T>() as u64;
|
2021-09-07 20:25:56 +00:00
|
|
|
|
2022-09-07 04:32:15 +00:00
|
|
|
pub fn from_alignment(uniform_alignment: u32) -> Self {
|
2021-09-07 20:25:56 +00:00
|
|
|
// Calculate alignment of uniforms.
|
|
|
|
let align_mask = uniform_alignment - 1;
|
|
|
|
let aligned_uniforms_size = (Self::UNIFORMS_SIZE as u32 + align_mask) & !align_mask;
|
2022-09-07 04:32:15 +00:00
|
|
|
BufferStorageBuilder {
|
|
|
|
arena: Arena::with_capacity(8),
|
|
|
|
allocator_builder: |arena| {
|
|
|
|
RefCell::new(Allocator {
|
|
|
|
arena,
|
|
|
|
blocks: Vec::with_capacity(8),
|
|
|
|
})
|
|
|
|
},
|
|
|
|
staging_belt: RefCell::new(StagingBelt::new(u64::from(Self::BLOCK_SIZE) / 2)),
|
2021-09-07 20:25:56 +00:00
|
|
|
aligned_uniforms_size,
|
2022-09-07 04:32:15 +00:00
|
|
|
phantom: PhantomData,
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
2022-09-07 04:32:15 +00:00
|
|
|
.build()
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
|
|
|
|
2022-09-04 19:19:16 +00:00
|
|
|
/// Adds a newly allocated buffer to the block list, and returns it.
|
2022-09-07 04:32:15 +00:00
|
|
|
pub fn allocate_block(&self, device: &wgpu::Device, layout: &wgpu::BindGroupLayout) {
|
2022-09-04 19:19:16 +00:00
|
|
|
let buffer_label = create_debug_label!("Dynamic buffer");
|
|
|
|
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
|
|
|
label: buffer_label.as_deref(),
|
|
|
|
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
|
|
|
size: Self::BLOCK_SIZE.into(),
|
|
|
|
mapped_at_creation: false,
|
|
|
|
});
|
|
|
|
|
|
|
|
let bind_group_label = create_debug_label!("Dynamic buffer bind group");
|
|
|
|
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
|
|
|
label: bind_group_label.as_deref(),
|
|
|
|
layout: &layout,
|
|
|
|
entries: &[wgpu::BindGroupEntry {
|
|
|
|
binding: 0,
|
|
|
|
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
|
|
|
buffer: &buffer,
|
|
|
|
offset: 0,
|
|
|
|
size: wgpu::BufferSize::new(std::mem::size_of::<T>() as u64),
|
|
|
|
}),
|
|
|
|
}],
|
|
|
|
});
|
|
|
|
|
2022-09-07 04:32:15 +00:00
|
|
|
self.with_allocator(|alloc| {
|
|
|
|
let mut alloc = alloc.borrow_mut();
|
|
|
|
let block = alloc.arena.alloc(Block { buffer, bind_group });
|
|
|
|
alloc.blocks.push(block);
|
|
|
|
});
|
2022-09-04 19:19:16 +00:00
|
|
|
}
|
2022-09-07 11:20:28 +00:00
|
|
|
|
|
|
|
pub fn recall(&mut self) {
|
2022-09-07 04:32:15 +00:00
|
|
|
self.with_staging_belt(|belt| belt.borrow_mut().recall());
|
2022-09-07 11:20:28 +00:00
|
|
|
}
|
2022-09-04 19:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T: Pod> UniformBuffer<'a, T> {
|
|
|
|
/// Creates a new `UniformBuffer` with the given uniform layout.
|
|
|
|
pub fn new(buffers: &'a mut BufferStorage<T>) -> Self {
|
|
|
|
Self {
|
|
|
|
buffers,
|
|
|
|
cur_block: 0,
|
|
|
|
cur_offset: 0,
|
|
|
|
}
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Enqueue `data` for upload into the given command encoder, and set the bind group on `render_pass`
|
|
|
|
/// to use the uniform data.
|
2022-09-04 19:19:16 +00:00
|
|
|
pub fn write_uniforms<'b>(
|
|
|
|
&mut self,
|
2021-09-07 20:25:56 +00:00
|
|
|
device: &wgpu::Device,
|
2022-08-08 00:12:35 +00:00
|
|
|
layout: &wgpu::BindGroupLayout,
|
2021-09-07 20:25:56 +00:00
|
|
|
command_encoder: &mut wgpu::CommandEncoder,
|
2022-09-04 19:19:16 +00:00
|
|
|
render_pass: &mut wgpu::RenderPass<'b>,
|
2021-09-07 20:25:56 +00:00
|
|
|
bind_group_index: u32,
|
|
|
|
data: &T,
|
2022-09-04 19:19:16 +00:00
|
|
|
) where
|
|
|
|
'a: 'b,
|
|
|
|
{
|
2021-09-07 20:25:56 +00:00
|
|
|
// Allocate a new block if we've exceeded our capacity.
|
2022-09-07 04:32:15 +00:00
|
|
|
if self.cur_block
|
|
|
|
>= self
|
|
|
|
.buffers
|
|
|
|
.with_allocator(|alloc| alloc.borrow().blocks.len())
|
|
|
|
{
|
2022-09-04 19:19:16 +00:00
|
|
|
self.buffers.allocate_block(device, layout);
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
2022-09-07 04:32:15 +00:00
|
|
|
|
|
|
|
let block: &'a Block = self
|
|
|
|
.buffers
|
|
|
|
.with_allocator(|alloc| alloc.borrow().blocks[self.cur_block]);
|
2021-09-07 20:25:56 +00:00
|
|
|
|
|
|
|
// Copy the data into the buffer via the staging belt.
|
2022-09-07 04:32:15 +00:00
|
|
|
self.buffers.with_staging_belt(|belt| {
|
|
|
|
belt.borrow_mut()
|
|
|
|
.write_buffer(
|
|
|
|
command_encoder,
|
|
|
|
&block.buffer,
|
|
|
|
self.cur_offset.into(),
|
|
|
|
BufferStorage::<T>::UNIFORMS_SIZE.try_into().unwrap(),
|
|
|
|
device,
|
|
|
|
)
|
|
|
|
.copy_from_slice(bytemuck::cast_slice(std::slice::from_ref(data)));
|
|
|
|
});
|
2021-09-07 20:25:56 +00:00
|
|
|
|
|
|
|
// Set the bind group to the final uniform location.
|
2022-09-07 04:32:15 +00:00
|
|
|
render_pass.set_bind_group(bind_group_index, &block.bind_group, &[self.cur_offset]);
|
2021-09-07 20:25:56 +00:00
|
|
|
|
|
|
|
// Advance offset.
|
2022-09-07 04:32:15 +00:00
|
|
|
self.cur_offset += self.buffers.borrow_aligned_uniforms_size();
|
2021-09-07 20:25:56 +00:00
|
|
|
// Advance to next buffer if we are out of room in this buffer.
|
2022-09-07 04:32:15 +00:00
|
|
|
if BufferStorage::<T>::BLOCK_SIZE - self.cur_offset
|
|
|
|
< *self.buffers.borrow_aligned_uniforms_size()
|
|
|
|
{
|
2021-09-07 20:25:56 +00:00
|
|
|
self.cur_block += 1;
|
|
|
|
self.cur_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Should be called at the end of a frame.
|
2022-09-04 19:19:16 +00:00
|
|
|
pub fn finish(self) {
|
2022-09-07 04:32:15 +00:00
|
|
|
self.buffers
|
|
|
|
.with_staging_belt(|belt| belt.borrow_mut().finish());
|
2021-09-07 20:25:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A block of GPU memory that will contain our uniforms.
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct Block {
|
|
|
|
buffer: wgpu::Buffer,
|
|
|
|
bind_group: wgpu::BindGroup,
|
|
|
|
}
|