2020-01-02 03:33:21 +00:00
|
|
|
use ruffle_core::backend::audio::{
|
2022-08-18 19:21:03 +00:00
|
|
|
swf, AudioBackend, AudioMixer, AudioMixerProxy, DecodeError, RegisterError, SoundHandle,
|
2023-07-22 02:45:10 +00:00
|
|
|
SoundInstanceHandle, SoundStreamInfo, SoundTransform,
|
2020-01-02 03:33:21 +00:00
|
|
|
};
|
2022-05-10 01:01:13 +00:00
|
|
|
use ruffle_core::impl_audio_mixer_backend;
|
2020-04-23 22:04:07 +00:00
|
|
|
use ruffle_web_common::JsResult;
|
2023-07-04 17:38:45 +00:00
|
|
|
use std::cell::{Cell, RefCell};
|
|
|
|
use std::rc::Rc;
|
|
|
|
use std::sync::Arc;
|
2022-05-10 04:31:16 +00:00
|
|
|
use std::time::Duration;
|
2023-03-03 12:52:40 +00:00
|
|
|
use tracing_subscriber::layer::Layered;
|
|
|
|
use tracing_subscriber::Registry;
|
|
|
|
use tracing_wasm::WASMLayer;
|
2019-12-04 06:55:58 +00:00
|
|
|
use wasm_bindgen::{closure::Closure, prelude::*, JsCast};
|
2022-05-10 01:01:13 +00:00
|
|
|
use web_sys::AudioContext;
|
2019-08-26 23:38:37 +00:00
|
|
|
|
|
|
|
#[allow(dead_code)]
|
2022-05-10 01:01:13 +00:00
|
|
|
pub struct WebAudioBackend {
|
|
|
|
mixer: AudioMixer,
|
|
|
|
context: AudioContext,
|
2023-02-26 00:23:40 +00:00
|
|
|
/// The current length of both buffers, in frames (pairs of left/right samples).
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer_size: Rc<Cell<u32>>,
|
|
|
|
buffers: Vec<Rc<RefCell<Buffer>>>,
|
2023-02-26 00:23:40 +00:00
|
|
|
/// When the last submitted buffer is expected to play out completely, in seconds.
|
2023-07-04 17:38:45 +00:00
|
|
|
time: Rc<Cell<f64>>,
|
2023-03-15 20:32:22 +00:00
|
|
|
/// For how many seconds were we able to continuously fill the next buffer "at a sufficiently early time".
|
2023-07-04 17:38:45 +00:00
|
|
|
probation_elapsed: Rc<Cell<f32>>,
|
2023-03-03 12:52:40 +00:00
|
|
|
log_subscriber: Arc<Layered<WASMLayer, Registry>>,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl WebAudioBackend {
|
2023-02-26 00:23:40 +00:00
|
|
|
/// These govern the adaptive buffer size algorithm, all are in number of frames (pairs of samples).
|
|
|
|
/// They must all be integer powers of 2 (due to how the algorithm works).
|
|
|
|
const INITIAL_BUFFER_SIZE: u32 = 2048; // 46.44 ms at 44.1 kHz
|
|
|
|
const MIN_BUFFER_SIZE: u32 = 1024; // 23.22 ms at 44.1 kHz
|
|
|
|
const MAX_BUFFER_SIZE: u32 = 16384; // 371.52 ms at 44.1 kHz
|
2023-03-15 20:01:13 +00:00
|
|
|
/// Buffer size will not be increased until this many seconds have elapsed after startup,
|
|
|
|
/// to account for any initialization (shape tessellation, WASM JIT, etc.) hitches.
|
|
|
|
const WARMUP_PERIOD: f32 = 2.0;
|
2023-02-26 00:23:40 +00:00
|
|
|
|
2023-03-15 20:32:22 +00:00
|
|
|
/// For how long we need to fill every single buffer "quickly enough" in order to decrease buffer size.
|
|
|
|
/// Measured in seconds. A higher value is more conservative.
|
|
|
|
const PROBATION_LENGTH: f32 = 10.0;
|
2023-02-26 00:23:40 +00:00
|
|
|
/// The limit of playout ratio (progress) when filling the next buffer, under which it is
|
|
|
|
/// considered "quick". Must be in 0..1, and less than `0.5 * NORMAL_PROGRESS_RANGE_MAX`.
|
|
|
|
const NORMAL_PROGRESS_RANGE_MIN: f64 = 0.25;
|
|
|
|
/// The limit of playout ratio (progress) when filling the next buffer, over which buffer size
|
|
|
|
/// is increased immediately. Must be in 0..1, and greater than `2 * NORMAL_PROGRESS_RANGE_MIN`.
|
|
|
|
const NORMAL_PROGRESS_RANGE_MAX: f64 = 0.75;
|
2022-05-10 01:01:13 +00:00
|
|
|
|
2023-03-03 12:52:40 +00:00
|
|
|
pub fn new(log_subscriber: Arc<Layered<WASMLayer, Registry>>) -> Result<Self, JsError> {
|
2022-05-10 01:01:13 +00:00
|
|
|
let context = AudioContext::new().into_js_result()?;
|
|
|
|
let sample_rate = context.sample_rate();
|
|
|
|
let mut audio = Self {
|
|
|
|
context,
|
|
|
|
mixer: AudioMixer::new(2, sample_rate as u32),
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer_size: Rc::new(Cell::new(Self::INITIAL_BUFFER_SIZE)),
|
2022-05-10 01:01:13 +00:00
|
|
|
buffers: Vec::with_capacity(2),
|
2023-07-04 17:38:45 +00:00
|
|
|
time: Rc::new(Cell::new(0.0)),
|
|
|
|
probation_elapsed: Rc::new(Cell::new(0.0)),
|
2023-03-03 12:52:40 +00:00
|
|
|
log_subscriber,
|
2022-05-10 01:01:13 +00:00
|
|
|
};
|
2019-12-04 06:55:58 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
// Create and start the audio buffers.
|
|
|
|
// These buffers ping-pong as the audio stream plays.
|
|
|
|
for _ in 0..2 {
|
|
|
|
let buffer = Buffer::new(&audio)?;
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer.borrow_mut().play()?;
|
2022-05-10 01:01:13 +00:00
|
|
|
audio.buffers.push(buffer);
|
2019-12-04 06:55:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
Ok(audio)
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 00:11:25 +00:00
|
|
|
/// Returns the JavaScript AudioContext.
|
|
|
|
pub fn audio_context(&self) -> &AudioContext {
|
|
|
|
&self.context
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl AudioBackend for WebAudioBackend {
|
2022-05-10 01:01:13 +00:00
|
|
|
impl_audio_mixer_backend!(mixer);
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2020-09-23 01:18:22 +00:00
|
|
|
fn play(&mut self) {
|
2019-08-26 23:38:37 +00:00
|
|
|
let _ = self.context.resume();
|
|
|
|
}
|
2019-09-19 07:21:22 +00:00
|
|
|
|
2020-09-23 01:18:22 +00:00
|
|
|
fn pause(&mut self) {
|
2020-09-18 22:52:35 +00:00
|
|
|
let _ = self.context.suspend();
|
|
|
|
}
|
2022-05-10 04:31:16 +00:00
|
|
|
|
|
|
|
fn position_resolution(&self) -> Option<Duration> {
|
2023-07-04 17:38:45 +00:00
|
|
|
Some(Duration::from_secs_f64(
|
|
|
|
f64::from(self.buffer_size.get()) / f64::from(self.context.sample_rate()),
|
|
|
|
))
|
2022-05-10 04:31:16 +00:00
|
|
|
}
|
2022-05-10 01:01:13 +00:00
|
|
|
}
|
2020-09-18 22:52:35 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
impl Drop for WebAudioBackend {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let _ = self.context.close();
|
2019-09-19 07:21:22 +00:00
|
|
|
}
|
2022-05-10 01:01:13 +00:00
|
|
|
}
|
2019-09-27 17:23:53 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
struct Buffer {
|
|
|
|
context: AudioContext,
|
|
|
|
mixer_proxy: AudioMixerProxy,
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer_size: Rc<Cell<u32>>,
|
2022-05-10 01:01:13 +00:00
|
|
|
audio_buffer: Vec<f32>,
|
|
|
|
js_buffer: web_sys::AudioBuffer,
|
|
|
|
audio_node: Option<web_sys::AudioBufferSourceNode>,
|
|
|
|
on_ended_handler: Closure<dyn FnMut()>,
|
2023-07-04 17:38:45 +00:00
|
|
|
time: Rc<Cell<f64>>,
|
|
|
|
probation_elapsed: Rc<Cell<f32>>,
|
2023-03-03 12:52:40 +00:00
|
|
|
log_subscriber: Arc<Layered<WASMLayer, Registry>>,
|
2022-05-10 01:01:13 +00:00
|
|
|
}
|
2021-01-23 02:03:59 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
impl Buffer {
|
2023-07-04 17:38:45 +00:00
|
|
|
fn new(audio: &WebAudioBackend) -> Result<Rc<RefCell<Self>>, JsError> {
|
2022-05-10 01:01:13 +00:00
|
|
|
let sample_rate = audio.context.sample_rate();
|
2023-07-04 17:38:45 +00:00
|
|
|
let buffer = Rc::new(RefCell::new(Self {
|
2022-05-10 01:01:13 +00:00
|
|
|
context: audio.context.clone(),
|
|
|
|
mixer_proxy: audio.mixer.proxy(),
|
2023-02-26 00:23:40 +00:00
|
|
|
buffer_size: audio.buffer_size.clone(),
|
|
|
|
audio_buffer: vec![0.0; 2 * WebAudioBackend::INITIAL_BUFFER_SIZE as usize],
|
2022-05-10 01:01:13 +00:00
|
|
|
js_buffer: audio
|
|
|
|
.context
|
2023-02-26 00:23:40 +00:00
|
|
|
.create_buffer(2, WebAudioBackend::INITIAL_BUFFER_SIZE, sample_rate)
|
2022-05-10 01:01:13 +00:00
|
|
|
.into_js_result()?,
|
2023-02-26 00:23:40 +00:00
|
|
|
audio_node: None,
|
2022-05-10 01:01:13 +00:00
|
|
|
on_ended_handler: Closure::new(|| {}),
|
|
|
|
time: audio.time.clone(),
|
2023-03-22 21:50:30 +00:00
|
|
|
probation_elapsed: audio.probation_elapsed.clone(),
|
2023-03-03 12:52:40 +00:00
|
|
|
log_subscriber: audio.log_subscriber.clone(),
|
2022-05-10 01:01:13 +00:00
|
|
|
}));
|
|
|
|
|
|
|
|
// Swap in the onended handler.
|
|
|
|
let buffer_handle = buffer.clone();
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer.borrow_mut().on_ended_handler = Closure::new(move || {
|
2022-05-10 01:01:13 +00:00
|
|
|
// Refill and schedule the buffer for playback.
|
2023-07-04 17:38:45 +00:00
|
|
|
let _ = buffer_handle.borrow_mut().play();
|
2023-02-24 23:25:12 +00:00
|
|
|
});
|
2021-01-24 08:16:07 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
Ok(buffer)
|
2021-08-13 02:47:20 +00:00
|
|
|
}
|
|
|
|
|
2022-08-18 19:23:25 +00:00
|
|
|
fn play(&mut self) -> Result<(), JsError> {
|
2023-03-03 12:52:40 +00:00
|
|
|
let _subscriber = tracing::subscriber::set_default(self.log_subscriber.clone());
|
|
|
|
|
2023-07-04 17:38:45 +00:00
|
|
|
let time_left = self.time.get() - self.context.current_time();
|
|
|
|
let mut buffer_timestep =
|
|
|
|
f64::from(self.buffer_size.get()) / f64::from(self.context.sample_rate());
|
2023-02-26 00:23:40 +00:00
|
|
|
|
|
|
|
// How far along the other buffer is in playing out right now:
|
|
|
|
// ~0: it has just started playing, we are well within time
|
|
|
|
// 0.25 .. 0.75: "optimal range"
|
|
|
|
// ~1: we are just barely keeping up with feeding the output
|
|
|
|
// >1: we are falling behind, audio stutters
|
|
|
|
let progress = (buffer_timestep - time_left) / buffer_timestep;
|
|
|
|
tracing::trace!(
|
|
|
|
"Audio buffer progress when filling the next one: {}%",
|
|
|
|
progress * 100.0
|
|
|
|
);
|
|
|
|
|
|
|
|
if progress < WebAudioBackend::NORMAL_PROGRESS_RANGE_MIN {
|
|
|
|
// This fill is considered quick, let's count it.
|
2023-07-04 17:38:45 +00:00
|
|
|
self.probation_elapsed
|
|
|
|
.set(self.probation_elapsed.get() + buffer_timestep as f32);
|
2023-02-26 00:23:40 +00:00
|
|
|
} else if progress < WebAudioBackend::NORMAL_PROGRESS_RANGE_MAX {
|
2023-03-15 20:32:22 +00:00
|
|
|
// This fill is in the "normal" range, only resetting the probation time.
|
2023-07-04 17:38:45 +00:00
|
|
|
self.probation_elapsed.set(0.0);
|
2023-02-26 00:23:40 +00:00
|
|
|
} else {
|
|
|
|
// This fill is considered slow (maybe even too slow), increasing the buffer size.
|
2023-07-04 17:38:45 +00:00
|
|
|
self.probation_elapsed.set(0.0);
|
2023-02-26 00:23:40 +00:00
|
|
|
if progress >= 1.0 {
|
|
|
|
tracing::debug!("Audio underrun detected!");
|
|
|
|
}
|
2023-07-04 17:38:45 +00:00
|
|
|
if self.time.get() as f32 > WebAudioBackend::WARMUP_PERIOD {
|
|
|
|
if self.buffer_size.get() < WebAudioBackend::MAX_BUFFER_SIZE {
|
|
|
|
self.buffer_size.set(self.buffer_size.get() * 2);
|
|
|
|
tracing::debug!(
|
|
|
|
"Increased audio buffer size to {} frames",
|
|
|
|
self.buffer_size.get()
|
|
|
|
);
|
2023-03-15 20:32:22 +00:00
|
|
|
} else {
|
2023-03-15 20:01:13 +00:00
|
|
|
tracing::debug!("Not increasing audio buffer size, already at max size");
|
|
|
|
}
|
2023-03-15 20:32:22 +00:00
|
|
|
} else {
|
|
|
|
tracing::debug!(
|
|
|
|
"Not increasing audio buffer size, still in warmup period (at {} of {} sec)",
|
2023-07-04 17:38:45 +00:00
|
|
|
self.time.get(),
|
2023-03-15 20:32:22 +00:00
|
|
|
WebAudioBackend::WARMUP_PERIOD
|
|
|
|
);
|
2023-02-26 00:23:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If enough quick fills happened, we decrease the buffer size.
|
2023-07-04 17:38:45 +00:00
|
|
|
if self.probation_elapsed.get() > WebAudioBackend::PROBATION_LENGTH
|
|
|
|
&& self.buffer_size.get() > WebAudioBackend::MIN_BUFFER_SIZE
|
2023-02-26 00:23:40 +00:00
|
|
|
{
|
2023-07-04 17:38:45 +00:00
|
|
|
self.buffer_size.set(self.buffer_size.get() / 2);
|
|
|
|
tracing::debug!(
|
|
|
|
"Decreased audio buffer size to {} frames",
|
|
|
|
self.buffer_size.get()
|
|
|
|
);
|
|
|
|
self.probation_elapsed.set(0.0);
|
2023-02-26 00:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// In case buffer_size changed above (or in the latest call in the other instance),
|
|
|
|
// we need to recaulculate/recreate/resize a couple of things that depend on it.
|
2023-07-04 17:38:45 +00:00
|
|
|
if self.js_buffer.length() != self.buffer_size.get() {
|
2023-02-26 00:23:40 +00:00
|
|
|
tracing::trace!("Recreating JS side buffer with new length");
|
2023-07-04 17:38:45 +00:00
|
|
|
buffer_timestep =
|
|
|
|
f64::from(self.buffer_size.get()) / f64::from(self.context.sample_rate());
|
2023-02-26 00:23:40 +00:00
|
|
|
self.js_buffer = self
|
|
|
|
.context
|
2023-07-04 17:38:45 +00:00
|
|
|
.create_buffer(2, self.buffer_size.get(), self.context.sample_rate())
|
2023-02-26 00:23:40 +00:00
|
|
|
.into_js_result()?;
|
2023-07-04 17:38:45 +00:00
|
|
|
self.audio_buffer
|
|
|
|
.resize(2 * self.buffer_size.get() as usize, 0.0);
|
2023-02-26 00:23:40 +00:00
|
|
|
}
|
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
// Mix new audio into the output buffer and copy to JS.
|
|
|
|
self.mixer_proxy.mix(&mut self.audio_buffer);
|
|
|
|
copy_to_audio_buffer_interleaved(&self.js_buffer, &self.audio_buffer);
|
2021-08-17 22:19:59 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
// Create the audio node to play back the audio buffer.
|
|
|
|
let audio_node = self.context.create_buffer_source().into_js_result()?;
|
|
|
|
audio_node.set_buffer(Some(&self.js_buffer));
|
|
|
|
audio_node
|
|
|
|
.connect_with_audio_node(&self.context.destination())
|
|
|
|
.into_js_result()?;
|
|
|
|
audio_node.set_onended(Some(self.on_ended_handler.as_ref().unchecked_ref()));
|
|
|
|
|
|
|
|
// Sanity: ensure our player time is not in the past. This can happen due to underruns.
|
2023-07-04 17:38:45 +00:00
|
|
|
self.time
|
|
|
|
.set(f64::max(self.time.get(), self.context.current_time()));
|
2022-05-10 01:01:13 +00:00
|
|
|
|
|
|
|
// Schedule this buffer for playback and advance the player time.
|
2023-07-04 17:38:45 +00:00
|
|
|
audio_node
|
|
|
|
.start_with_when(self.time.get())
|
|
|
|
.into_js_result()?;
|
|
|
|
self.time.set(self.time.get() + buffer_timestep);
|
2022-05-10 01:01:13 +00:00
|
|
|
|
|
|
|
self.audio_node = Some(audio_node);
|
|
|
|
Ok(())
|
2021-01-24 08:16:07 +00:00
|
|
|
}
|
2022-05-10 01:01:13 +00:00
|
|
|
}
|
2021-10-16 08:25:31 +00:00
|
|
|
|
2022-05-10 01:01:13 +00:00
|
|
|
impl Drop for Buffer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(audio_node) = self.audio_node.take() {
|
|
|
|
audio_node.set_onended(None);
|
|
|
|
}
|
2021-10-16 08:25:31 +00:00
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2023-02-27 13:17:37 +00:00
|
|
|
#[wasm_bindgen(raw_module = "./ruffle-imports")]
|
2019-12-04 06:55:58 +00:00
|
|
|
extern "C" {
|
2022-05-10 01:01:13 +00:00
|
|
|
// Imported JS method to copy interleaved audio data into an `AudioBuffer`.
|
|
|
|
#[wasm_bindgen(js_name = "copyToAudioBufferInterleaved")]
|
|
|
|
fn copy_to_audio_buffer_interleaved(
|
2019-12-04 06:55:58 +00:00
|
|
|
audio_buffer: &web_sys::AudioBuffer,
|
2022-05-10 01:01:13 +00:00
|
|
|
interleaved_data: &[f32],
|
2019-12-04 06:55:58 +00:00
|
|
|
);
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|