2019-08-26 23:38:37 +00:00
|
|
|
use fnv::FnvHashMap;
|
|
|
|
use generational_arena::Arena;
|
2020-01-02 03:33:21 +00:00
|
|
|
use ruffle_core::backend::audio::{
|
2021-05-03 01:50:13 +00:00
|
|
|
decoders::{AdpcmDecoder, NellymoserDecoder},
|
2021-01-26 08:50:19 +00:00
|
|
|
swf::{self, AudioCompression},
|
|
|
|
AudioBackend, PreloadStreamHandle, SoundHandle, SoundInstanceHandle, SoundTransform,
|
2020-01-02 03:33:21 +00:00
|
|
|
};
|
2020-04-23 22:04:07 +00:00
|
|
|
use ruffle_web_common::JsResult;
|
2019-08-26 23:38:37 +00:00
|
|
|
use std::cell::{Cell, RefCell};
|
|
|
|
use std::rc::Rc;
|
2019-12-04 06:55:58 +00:00
|
|
|
use wasm_bindgen::{closure::Closure, prelude::*, JsCast};
|
2021-01-24 08:16:07 +00:00
|
|
|
use web_sys::{AudioContext, GainNode};
|
2019-08-26 23:38:37 +00:00
|
|
|
|
|
|
|
pub struct WebAudioBackend {
|
|
|
|
context: AudioContext,
|
|
|
|
sounds: Arena<Sound>,
|
|
|
|
left_samples: Vec<f32>,
|
|
|
|
right_samples: Vec<f32>,
|
2021-10-16 08:25:31 +00:00
|
|
|
output_time: f64,
|
2019-10-30 00:02:11 +00:00
|
|
|
frame_rate: f64,
|
2019-12-04 06:55:58 +00:00
|
|
|
min_sample_rate: u16,
|
2021-01-26 08:50:19 +00:00
|
|
|
preload_stream_data: FnvHashMap<PreloadStreamHandle, StreamData>,
|
|
|
|
next_stream_id: u32,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
thread_local! {
|
2019-09-27 19:25:22 +00:00
|
|
|
static SOUND_INSTANCES: RefCell<Arena<SoundInstance>> = RefCell::new(Arena::new());
|
2019-08-26 23:38:37 +00:00
|
|
|
static NUM_SOUNDS_LOADING: Cell<u32> = Cell::new(0);
|
|
|
|
}
|
|
|
|
|
2019-10-29 01:12:44 +00:00
|
|
|
#[derive(Clone)]
|
2019-08-26 23:38:37 +00:00
|
|
|
struct StreamData {
|
|
|
|
format: swf::SoundFormat,
|
|
|
|
audio_data: Vec<u8>,
|
|
|
|
num_sample_frames: u32,
|
|
|
|
samples_per_block: u32,
|
2019-10-30 01:05:09 +00:00
|
|
|
skip_sample_frames: u16,
|
2019-10-28 09:23:27 +00:00
|
|
|
adpcm_block_offsets: Vec<usize>,
|
2019-10-30 00:02:11 +00:00
|
|
|
|
|
|
|
/// List of stream segments. Contains the frame they start on and the starting sample.
|
|
|
|
/// Guaranteed to be in frame order.
|
|
|
|
stream_segments: Vec<(u16, u32)>,
|
|
|
|
|
|
|
|
/// The last frame we received a `StreamSoundBlock` from.
|
|
|
|
last_clip_frame: u16,
|
2022-01-04 01:45:14 +00:00
|
|
|
|
|
|
|
/// The number of audio samples for use in future animation frames.
|
|
|
|
///
|
|
|
|
/// Only used in MP3 encoding to properly handle gaps in the audio track.
|
|
|
|
mp3_samples_buffered: i32,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type AudioBufferPtr = Rc<RefCell<web_sys::AudioBuffer>>;
|
|
|
|
|
|
|
|
// A sound can be either as a JS AudioBuffer and as a on--the-fly decoded stream using a ScriptProcessorNode.
|
|
|
|
#[allow(dead_code)]
|
|
|
|
enum SoundSource {
|
|
|
|
// Pre-decoded audio buffer.
|
|
|
|
AudioBuffer(AudioBufferPtr),
|
|
|
|
|
|
|
|
// Decode the audio data on the fly from a byte stream.
|
|
|
|
Decoder(Vec<u8>),
|
|
|
|
}
|
|
|
|
|
2019-09-19 06:40:20 +00:00
|
|
|
#[allow(dead_code)]
|
2019-08-26 23:38:37 +00:00
|
|
|
struct Sound {
|
|
|
|
format: swf::SoundFormat,
|
|
|
|
source: SoundSource,
|
2019-10-30 01:05:09 +00:00
|
|
|
|
|
|
|
/// Number of samples in this audio.
|
|
|
|
/// This may be shorter than the actual length of the audio data to allow for seamless looping.
|
|
|
|
/// For example, MP3 encoder adds gaps from encoder delay.
|
|
|
|
num_sample_frames: u32,
|
|
|
|
|
|
|
|
/// Number of samples to skip encoder delay.
|
|
|
|
skip_sample_frames: u16,
|
2021-01-26 08:50:19 +00:00
|
|
|
|
|
|
|
/// If this is a stream sound, the frame numbers and sample counts for each segment of the stream.
|
|
|
|
stream_segments: Vec<(u16, u32)>,
|
2021-08-13 02:47:20 +00:00
|
|
|
|
|
|
|
/// The length of the sound data as encoded in the SWF.
|
|
|
|
size: u32,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
type Decoder = Box<dyn Iterator<Item = [i16; 2]>>;
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2019-09-27 19:25:22 +00:00
|
|
|
/// An actively playing instance of a sound.
|
|
|
|
/// This sound can be either an event sound (`StartSound`) or
|
|
|
|
/// a stream sound (`SoundStreamBlock`).
|
|
|
|
struct SoundInstance {
|
|
|
|
/// Handle to the sound clip.
|
2021-01-25 01:08:25 +00:00
|
|
|
#[allow(dead_code)]
|
2019-09-27 19:25:22 +00:00
|
|
|
handle: Option<SoundHandle>,
|
|
|
|
|
|
|
|
/// Format of the sound.
|
|
|
|
format: swf::SoundFormat,
|
|
|
|
|
|
|
|
/// On web, sounds can be played via different methods:
|
|
|
|
/// either decoded on the fly with Decoder, or pre-decoded
|
|
|
|
/// and played with and AudioBufferSourceNode.
|
|
|
|
instance_type: SoundInstanceType,
|
2021-10-16 08:25:31 +00:00
|
|
|
|
|
|
|
/// The time in seconds that this buffer started playing.
|
|
|
|
/// This time uses the same origin as `AudioContext.currentTime`.
|
|
|
|
start_time: f64,
|
|
|
|
|
|
|
|
/// The starting point of the sound data in seconds.
|
|
|
|
/// `0.0` means the beginning of the sound.
|
|
|
|
loop_start: f64,
|
|
|
|
|
|
|
|
/// The ending point of the sound data in seconds.
|
|
|
|
/// `f64::MAX` if no end point is specified.
|
|
|
|
loop_end: f64,
|
|
|
|
|
|
|
|
/// The number of times the sound data will loop.
|
|
|
|
/// `1` means the sound plays once.
|
|
|
|
num_loops: u16,
|
2019-09-27 19:25:22 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 02:34:52 +00:00
|
|
|
/// The Drop impl ensures that the sound is stopped and remove from the audio context,
|
|
|
|
/// and any event listeners are removed.
|
|
|
|
impl Drop for SoundInstance {
|
|
|
|
fn drop(&mut self) {
|
2021-01-24 08:16:07 +00:00
|
|
|
if let SoundInstanceType::AudioBuffer(instance) = &self.instance_type {
|
|
|
|
let _ = instance.buffer_source_node.set_onended(None);
|
|
|
|
let _ = instance.node.disconnect();
|
2020-10-26 02:34:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
#[allow(dead_code)]
|
2019-09-27 19:25:22 +00:00
|
|
|
enum SoundInstanceType {
|
|
|
|
Decoder(Decoder),
|
2021-01-24 08:16:07 +00:00
|
|
|
AudioBuffer(AudioBufferInstance),
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A sound instance that is playing from an AudioBuffersource node.
|
|
|
|
struct AudioBufferInstance {
|
|
|
|
/// The node that is connected to the output.
|
|
|
|
node: web_sys::AudioNode,
|
|
|
|
|
|
|
|
/// The buffer node containing the audio data.
|
|
|
|
/// This is often the same as `node`, but will be different
|
|
|
|
/// if there is a custom envelope on this sound.
|
|
|
|
sound_transform_nodes: SoundTransformNodes,
|
|
|
|
|
|
|
|
/// The audio node with envelopes applied.
|
|
|
|
envelope_node: web_sys::AudioNode,
|
|
|
|
|
2021-02-01 20:38:13 +00:00
|
|
|
/// Whether the output of `envelope_node` is mono or stereo.
|
|
|
|
envelope_is_stereo: bool,
|
|
|
|
|
2021-01-24 08:16:07 +00:00
|
|
|
/// The buffer node containing the audio data.
|
|
|
|
/// This is often the same as `envelope_node`, but will be different
|
|
|
|
/// if there is a custom envelope on this sound.
|
|
|
|
buffer_source_node: web_sys::AudioBufferSourceNode,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AudioBufferInstance {
|
|
|
|
#[allow(clippy::float_cmp)]
|
|
|
|
fn set_transform(&mut self, context: &AudioContext, transform: &SoundTransform) {
|
|
|
|
let is_full_transform = transform.left_to_right != 0.0
|
|
|
|
|| transform.right_to_left != 0.0
|
|
|
|
|| transform.left_to_left != transform.right_to_right;
|
|
|
|
|
|
|
|
// Lazily instantiate gain nodes, depending on the type of transform.
|
|
|
|
match &self.sound_transform_nodes {
|
|
|
|
SoundTransformNodes::None => {
|
|
|
|
if is_full_transform {
|
|
|
|
let _ = self.create_full_transform(context);
|
|
|
|
} else if transform.left_to_left != 1.0 || transform.right_to_right != 1.0 {
|
|
|
|
let _ = self.create_volume_transform(context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SoundTransformNodes::Volume { .. } => {
|
|
|
|
if is_full_transform {
|
|
|
|
let _ = self.create_full_transform(context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SoundTransformNodes::Transform { .. } => (),
|
|
|
|
}
|
|
|
|
|
|
|
|
match &self.sound_transform_nodes {
|
|
|
|
SoundTransformNodes::None => (),
|
|
|
|
SoundTransformNodes::Volume { gain } => {
|
|
|
|
// Assumes right_to_right is matching.
|
|
|
|
gain.gain().set_value(transform.left_to_left);
|
|
|
|
}
|
|
|
|
SoundTransformNodes::Transform {
|
|
|
|
left_to_left_gain,
|
|
|
|
left_to_right_gain,
|
|
|
|
right_to_left_gain,
|
|
|
|
right_to_right_gain,
|
|
|
|
} => {
|
|
|
|
left_to_left_gain.gain().set_value(transform.left_to_left);
|
|
|
|
left_to_right_gain.gain().set_value(transform.left_to_right);
|
|
|
|
right_to_left_gain.gain().set_value(transform.right_to_left);
|
|
|
|
right_to_right_gain
|
|
|
|
.gain()
|
|
|
|
.set_value(transform.right_to_right);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds a gain node to this sound instance, allowing the volume to be adjusted.
|
|
|
|
fn create_volume_transform(
|
|
|
|
&mut self,
|
|
|
|
context: &AudioContext,
|
|
|
|
) -> Result<(), Box<dyn std::error::Error>> {
|
|
|
|
// Create the gain node to control the volume.
|
|
|
|
let gain = context.create_gain().into_js_result()?;
|
|
|
|
|
|
|
|
// Wire up the nodes.
|
|
|
|
// Note that for mono tracks, we want to use channel 0 (left) for both the left and right.
|
|
|
|
self.node.disconnect().warn_on_error();
|
|
|
|
self.envelope_node.disconnect().warn_on_error();
|
|
|
|
self.envelope_node
|
|
|
|
.connect_with_audio_node(&gain)
|
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
gain.connect_with_audio_node(&context.destination())
|
|
|
|
.warn_on_error();
|
|
|
|
|
|
|
|
self.node = gain.clone().into();
|
|
|
|
self.sound_transform_nodes = SoundTransformNodes::Volume { gain };
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds a bunch of gain nodes to this sound instance, allowing a SoundTransform
|
|
|
|
/// to be applied to it.
|
|
|
|
fn create_full_transform(
|
|
|
|
&mut self,
|
|
|
|
context: &AudioContext,
|
|
|
|
) -> Result<(), Box<dyn std::error::Error>> {
|
|
|
|
// Split the left and right channels.
|
|
|
|
let splitter = context
|
|
|
|
.create_channel_splitter_with_number_of_outputs(2)
|
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
// Create the envelope gain nodes for the left and right channels.
|
|
|
|
let left_to_left_gain = context.create_gain().into_js_result()?;
|
|
|
|
let left_to_right_gain = context.create_gain().into_js_result()?;
|
|
|
|
let right_to_left_gain = context.create_gain().into_js_result()?;
|
|
|
|
let right_to_right_gain = context.create_gain().into_js_result()?;
|
|
|
|
|
|
|
|
let merger: web_sys::AudioNode = context
|
|
|
|
.create_channel_merger_with_number_of_inputs(2)
|
|
|
|
.into_js_result()?
|
|
|
|
.into();
|
2020-10-26 02:34:52 +00:00
|
|
|
|
2021-01-24 08:16:07 +00:00
|
|
|
// Wire up the nodes.
|
|
|
|
// Note that for mono tracks, we want to use channel 0 (left) for both the left and right.
|
|
|
|
self.node.disconnect().warn_on_error();
|
|
|
|
self.envelope_node.disconnect().warn_on_error();
|
|
|
|
self.envelope_node
|
|
|
|
.connect_with_audio_node(&splitter)
|
|
|
|
.into_js_result()?;
|
|
|
|
splitter
|
|
|
|
.connect_with_audio_node_and_output(&left_to_left_gain, 0)
|
|
|
|
.into_js_result()?;
|
|
|
|
splitter
|
|
|
|
.connect_with_audio_node_and_output(&left_to_right_gain, 0)
|
|
|
|
.into_js_result()?;
|
|
|
|
splitter
|
2021-02-01 20:38:13 +00:00
|
|
|
.connect_with_audio_node_and_output(
|
|
|
|
&right_to_left_gain,
|
|
|
|
if self.envelope_is_stereo { 1 } else { 0 },
|
|
|
|
)
|
2021-01-24 08:16:07 +00:00
|
|
|
.into_js_result()?;
|
|
|
|
splitter
|
2021-02-01 20:38:13 +00:00
|
|
|
.connect_with_audio_node_and_output(
|
|
|
|
&right_to_right_gain,
|
|
|
|
if self.envelope_is_stereo { 1 } else { 0 },
|
|
|
|
)
|
2021-01-24 08:16:07 +00:00
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
left_to_left_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 0)
|
|
|
|
.into_js_result()?;
|
|
|
|
left_to_right_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 1)
|
|
|
|
.into_js_result()?;
|
|
|
|
right_to_left_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 0)
|
|
|
|
.into_js_result()?;
|
|
|
|
right_to_right_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 1)
|
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
merger
|
|
|
|
.connect_with_audio_node(&context.destination())
|
|
|
|
.warn_on_error();
|
|
|
|
|
|
|
|
self.node = merger;
|
2021-02-01 20:38:13 +00:00
|
|
|
self.envelope_is_stereo = true;
|
2021-01-24 08:16:07 +00:00
|
|
|
self.sound_transform_nodes = SoundTransformNodes::Transform {
|
|
|
|
left_to_left_gain,
|
|
|
|
left_to_right_gain,
|
|
|
|
right_to_left_gain,
|
|
|
|
right_to_right_gain,
|
|
|
|
};
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The gain nodes controlling the sound transform for this sound.
|
|
|
|
/// Because most sounds will be untransformed, we lazily instantiate
|
|
|
|
/// this only when necessary to play a transformed sound.
|
|
|
|
enum SoundTransformNodes {
|
|
|
|
/// No transform is applied to this sound.
|
|
|
|
None,
|
|
|
|
|
|
|
|
/// This sound has volume applied to it.
|
|
|
|
Volume { gain: GainNode },
|
|
|
|
|
|
|
|
/// This sound has a full transform applied to it.
|
|
|
|
Transform {
|
|
|
|
left_to_left_gain: GainNode,
|
|
|
|
left_to_right_gain: GainNode,
|
|
|
|
right_to_left_gain: GainNode,
|
|
|
|
right_to_right_gain: GainNode,
|
2020-10-26 02:34:52 +00:00
|
|
|
},
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Error = Box<dyn std::error::Error>;
|
|
|
|
|
|
|
|
impl WebAudioBackend {
|
|
|
|
pub fn new() -> Result<Self, Error> {
|
|
|
|
let context = AudioContext::new().map_err(|_| "Unable to create AudioContext")?;
|
2019-12-04 06:55:58 +00:00
|
|
|
|
|
|
|
// Deduce the minimum sample rate for this browser.
|
|
|
|
let mut min_sample_rate = 44100;
|
|
|
|
while min_sample_rate > 5512
|
|
|
|
&& context
|
|
|
|
.create_buffer(1, 1, (min_sample_rate >> 1) as f32)
|
|
|
|
.is_ok()
|
|
|
|
{
|
|
|
|
min_sample_rate >>= 1;
|
|
|
|
}
|
|
|
|
log::info!("Minimum audio buffer sample rate: {}", min_sample_rate);
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
Ok(Self {
|
|
|
|
context,
|
|
|
|
sounds: Arena::new(),
|
2021-01-26 08:50:19 +00:00
|
|
|
preload_stream_data: FnvHashMap::default(),
|
|
|
|
next_stream_id: 0,
|
2019-08-26 23:38:37 +00:00
|
|
|
left_samples: vec![],
|
|
|
|
right_samples: vec![],
|
2021-10-16 08:25:31 +00:00
|
|
|
output_time: 0.0,
|
2019-10-30 00:02:11 +00:00
|
|
|
frame_rate: 1.0,
|
2019-12-04 06:55:58 +00:00
|
|
|
min_sample_rate,
|
2019-08-26 23:38:37 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-19 00:11:25 +00:00
|
|
|
/// Returns the JavaScript AudioContext.
|
|
|
|
pub fn audio_context(&self) -> &AudioContext {
|
|
|
|
&self.context
|
|
|
|
}
|
|
|
|
|
2019-09-19 05:52:24 +00:00
|
|
|
fn start_sound_internal(
|
|
|
|
&mut self,
|
|
|
|
handle: SoundHandle,
|
2019-09-19 06:40:20 +00:00
|
|
|
settings: Option<&swf::SoundInfo>,
|
2022-01-04 01:45:14 +00:00
|
|
|
is_stream: bool,
|
2020-06-17 18:42:06 +00:00
|
|
|
) -> Result<SoundInstanceHandle, Error> {
|
2019-08-26 23:38:37 +00:00
|
|
|
let sound = self.sounds.get(handle).unwrap();
|
2020-06-17 18:42:06 +00:00
|
|
|
let handle = match &sound.source {
|
2019-08-26 23:38:37 +00:00
|
|
|
SoundSource::AudioBuffer(audio_buffer) => {
|
|
|
|
let audio_buffer = audio_buffer.borrow();
|
|
|
|
let node = self.context.create_buffer_source().unwrap();
|
|
|
|
node.set_buffer(Some(&*audio_buffer));
|
2019-09-19 06:40:20 +00:00
|
|
|
|
2020-10-12 20:07:55 +00:00
|
|
|
let buffer_source_node = node.clone();
|
|
|
|
|
2021-06-22 10:04:27 +00:00
|
|
|
let sound_sample_rate: f64 = sound.format.sample_rate.into();
|
2021-02-01 20:38:13 +00:00
|
|
|
let mut is_stereo = sound.format.is_stereo;
|
2021-10-16 08:25:31 +00:00
|
|
|
let mut loop_start = f64::from(sound.skip_sample_frames) / 44100.0;
|
|
|
|
let mut loop_end = std::f64::MAX;
|
|
|
|
let mut num_loops = 1;
|
2019-10-30 21:06:00 +00:00
|
|
|
let node: web_sys::AudioNode = match settings {
|
2019-09-19 06:40:20 +00:00
|
|
|
Some(settings)
|
2019-10-30 01:05:09 +00:00
|
|
|
if sound.skip_sample_frames > 0
|
|
|
|
|| settings.num_loops > 1
|
2019-09-19 06:40:20 +00:00
|
|
|
|| settings.in_sample.is_some()
|
|
|
|
|| settings.out_sample.is_some()
|
|
|
|
|| settings.envelope.is_some() =>
|
|
|
|
{
|
|
|
|
// Event sound with non-default parameters.
|
|
|
|
// Note that start/end values are in 44.1kHZ samples regardless of the sound's sample rate.
|
2021-10-16 08:25:31 +00:00
|
|
|
loop_start = f64::from(settings.in_sample.unwrap_or(0)) / 44100.0
|
2019-10-30 01:05:09 +00:00
|
|
|
+ f64::from(sound.skip_sample_frames) / sound_sample_rate;
|
2021-10-16 08:25:31 +00:00
|
|
|
num_loops = settings.num_loops;
|
|
|
|
node.set_loop(num_loops > 1);
|
|
|
|
node.set_loop_start(loop_start);
|
|
|
|
node.start_with_when_and_grain_offset(0.0, loop_start)
|
2019-09-19 06:40:20 +00:00
|
|
|
.warn_on_error();
|
2020-01-16 04:38:43 +00:00
|
|
|
|
|
|
|
let current_time = self.context.current_time();
|
2020-11-03 21:56:35 +00:00
|
|
|
|
|
|
|
// The length of the sound in the swf, or by the script playing it, doesn't
|
|
|
|
// always line up with the actual length of the sound.
|
|
|
|
// Always set a custom end point to make sure we're correct.
|
2021-10-16 08:25:31 +00:00
|
|
|
loop_end = if let Some(out_sample) = settings.out_sample {
|
2020-11-03 21:56:35 +00:00
|
|
|
f64::from(out_sample) / 44100.0
|
|
|
|
} else {
|
|
|
|
f64::from(sound.num_sample_frames + u32::from(sound.skip_sample_frames))
|
|
|
|
/ sound_sample_rate
|
|
|
|
};
|
2021-10-16 08:25:31 +00:00
|
|
|
|
2020-11-03 21:56:35 +00:00
|
|
|
// `AudioSourceBufferNode.loop` is a bool, so we have to stop the loop at the proper time.
|
|
|
|
// `start_with_when_and_grain_offset_and_grain_duration` unfortunately doesn't work
|
|
|
|
// as you might expect with loops, so we use `stop_with_when` to stop the loop.
|
2021-10-16 08:25:31 +00:00
|
|
|
let total_len = (loop_end - loop_start) * f64::from(settings.num_loops);
|
|
|
|
node.set_loop_end(loop_end);
|
2020-11-03 21:56:35 +00:00
|
|
|
node.stop_with_when(current_time + total_len)
|
|
|
|
.warn_on_error();
|
2019-10-30 21:06:00 +00:00
|
|
|
|
|
|
|
// For envelopes, we rig the node up to some splitter/gain nodes.
|
|
|
|
if let Some(envelope) = &settings.envelope {
|
2021-02-01 20:38:13 +00:00
|
|
|
is_stereo = true;
|
2020-01-16 04:36:05 +00:00
|
|
|
self.create_sound_envelope(
|
|
|
|
node.into(),
|
|
|
|
envelope,
|
|
|
|
sound.format.is_stereo,
|
2020-01-16 04:38:43 +00:00
|
|
|
current_time,
|
2020-01-16 04:36:05 +00:00
|
|
|
)
|
|
|
|
.unwrap()
|
2019-10-30 21:06:00 +00:00
|
|
|
} else {
|
|
|
|
node.into()
|
|
|
|
}
|
2019-09-19 06:40:20 +00:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// Default event sound or stream.
|
|
|
|
node.start().warn_on_error();
|
2019-10-30 21:06:00 +00:00
|
|
|
node.into()
|
2019-09-19 06:40:20 +00:00
|
|
|
}
|
2019-10-30 21:06:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
node.connect_with_audio_node(&self.context.destination())
|
|
|
|
.warn_on_error();
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2020-10-28 01:56:08 +00:00
|
|
|
// Create the sound instance and add it to the active instances list.
|
|
|
|
let instance = SoundInstance {
|
|
|
|
handle: Some(handle),
|
|
|
|
format: sound.format.clone(),
|
2021-10-16 08:25:31 +00:00
|
|
|
start_time: self.context.current_time(),
|
2022-01-04 01:45:14 +00:00
|
|
|
loop_start: if is_stream { 0.0 } else { loop_start },
|
|
|
|
loop_end: if is_stream {
|
|
|
|
loop_end - loop_start
|
|
|
|
} else {
|
|
|
|
loop_end
|
|
|
|
},
|
2021-10-16 08:25:31 +00:00
|
|
|
num_loops,
|
2021-01-24 08:16:07 +00:00
|
|
|
instance_type: SoundInstanceType::AudioBuffer(AudioBufferInstance {
|
|
|
|
envelope_node: node.clone(),
|
2021-02-01 20:38:13 +00:00
|
|
|
envelope_is_stereo: is_stereo,
|
2020-10-28 01:56:08 +00:00
|
|
|
node,
|
|
|
|
buffer_source_node: buffer_source_node.clone(),
|
2021-01-24 08:16:07 +00:00
|
|
|
sound_transform_nodes: SoundTransformNodes::None,
|
|
|
|
}),
|
2020-10-28 01:56:08 +00:00
|
|
|
};
|
|
|
|
let instance_handle = SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
|
|
|
instances.insert(instance)
|
|
|
|
});
|
|
|
|
|
|
|
|
// Create the listener to remove the sound when it ends.
|
2020-10-12 20:07:55 +00:00
|
|
|
let ended_handler = move || {
|
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
2020-10-28 01:56:08 +00:00
|
|
|
instances.remove(instance_handle)
|
2020-10-12 20:07:55 +00:00
|
|
|
});
|
|
|
|
};
|
|
|
|
let closure = Closure::once_into_js(Box::new(ended_handler) as Box<dyn FnMut()>);
|
|
|
|
// Note that we add the ended event to the AudioBufferSourceNode; an audio envelope adds more nodes
|
|
|
|
// in the graph, but these nodes don't fire the ended event.
|
2020-10-26 02:34:52 +00:00
|
|
|
let _ = buffer_source_node.set_onended(Some(closure.as_ref().unchecked_ref()));
|
|
|
|
|
2020-10-28 01:56:08 +00:00
|
|
|
instance_handle
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
SoundSource::Decoder(audio_data) => {
|
|
|
|
let decoder: Decoder = match sound.format.compression {
|
2019-10-28 08:48:14 +00:00
|
|
|
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new(
|
|
|
|
std::io::Cursor::new(audio_data.to_vec()),
|
|
|
|
sound.format.is_stereo,
|
|
|
|
sound.format.sample_rate,
|
2021-10-14 06:12:32 +00:00
|
|
|
)?),
|
2020-12-05 20:42:57 +00:00
|
|
|
AudioCompression::Nellymoser => Box::new(NellymoserDecoder::new(
|
|
|
|
std::io::Cursor::new(audio_data.to_vec()),
|
|
|
|
sound.format.sample_rate.into(),
|
|
|
|
)),
|
2020-06-17 18:42:06 +00:00
|
|
|
compression => {
|
|
|
|
return Err(format!("Unimplemented codec: {:?}", compression).into())
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let decoder: Decoder =
|
|
|
|
if sound.format.sample_rate != self.context.sample_rate() as u16 {
|
|
|
|
Box::new(resample(
|
|
|
|
decoder,
|
|
|
|
sound.format.sample_rate,
|
|
|
|
self.context.sample_rate() as u16,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
decoder
|
|
|
|
};
|
|
|
|
|
2019-09-27 19:25:22 +00:00
|
|
|
let instance = SoundInstance {
|
|
|
|
handle: Some(handle),
|
|
|
|
format: sound.format.clone(),
|
2021-10-16 08:25:31 +00:00
|
|
|
start_time: self.context.current_time(),
|
|
|
|
loop_start: 0.0,
|
|
|
|
loop_end: std::f64::MAX,
|
|
|
|
num_loops: 1,
|
2019-09-27 19:25:22 +00:00
|
|
|
instance_type: SoundInstanceType::Decoder(decoder),
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
2019-09-27 19:25:22 +00:00
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
|
|
|
let instance_handle = instances.insert(instance);
|
2019-08-26 23:38:37 +00:00
|
|
|
let script_processor_node = self.context.create_script_processor_with_buffer_size_and_number_of_input_channels_and_number_of_output_channels(4096, 0, if sound.format.is_stereo { 2 } else { 1 }).unwrap();
|
|
|
|
let script_node = script_processor_node.clone();
|
|
|
|
let closure = Closure::wrap(Box::new(move |event| {
|
2019-09-27 19:25:22 +00:00
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
|
|
|
let instance = instances.get_mut(instance_handle).unwrap();
|
|
|
|
let complete = WebAudioBackend::update_script_processor(instance, event);
|
2019-08-26 23:38:37 +00:00
|
|
|
if complete {
|
2019-09-27 19:25:22 +00:00
|
|
|
instances.remove(instance_handle);
|
2019-08-26 23:38:37 +00:00
|
|
|
script_node.disconnect().unwrap();
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}) as Box<dyn FnMut(web_sys::AudioProcessingEvent)>);
|
|
|
|
script_processor_node.set_onaudioprocess(Some(closure.as_ref().unchecked_ref()));
|
|
|
|
// TODO: This will leak memory per playing sound. Remember and properly drop the closure.
|
|
|
|
closure.forget();
|
|
|
|
|
2019-09-27 19:25:22 +00:00
|
|
|
instance_handle
|
2019-08-26 23:38:37 +00:00
|
|
|
})
|
|
|
|
}
|
2020-06-17 18:42:06 +00:00
|
|
|
};
|
|
|
|
Ok(handle)
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 21:06:00 +00:00
|
|
|
/// Wires up the envelope for Flash event sounds using `ChannelSplitter`, `Gain`, and `ChannelMerger` nodes.
|
|
|
|
fn create_sound_envelope(
|
|
|
|
&self,
|
|
|
|
node: web_sys::AudioNode,
|
|
|
|
envelope: &[swf::SoundEnvelopePoint],
|
2020-01-16 04:36:05 +00:00
|
|
|
is_stereo: bool,
|
2020-01-16 04:38:43 +00:00
|
|
|
start_time: f64,
|
2019-10-30 21:06:00 +00:00
|
|
|
) -> Result<web_sys::AudioNode, Box<dyn std::error::Error>> {
|
|
|
|
// Split the left and right channels.
|
|
|
|
let splitter = self
|
|
|
|
.context
|
|
|
|
.create_channel_splitter_with_number_of_outputs(2)
|
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
// Create the envelope gain nodes for the left and right channels.
|
|
|
|
let left_gain = self.context.create_gain().into_js_result()?;
|
|
|
|
let right_gain = self.context.create_gain().into_js_result()?;
|
|
|
|
|
|
|
|
// Initial volume is clamped to first envelope point.
|
|
|
|
if let Some(point) = envelope.get(0) {
|
|
|
|
left_gain
|
|
|
|
.gain()
|
|
|
|
.set_value_at_time(point.left_volume, 0.0)
|
|
|
|
.warn_on_error();
|
|
|
|
right_gain
|
|
|
|
.gain()
|
|
|
|
.set_value_at_time(point.right_volume, 0.0)
|
|
|
|
.warn_on_error();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add volume lerps for envelope points.
|
|
|
|
for point in envelope {
|
|
|
|
left_gain
|
|
|
|
.gain()
|
2020-01-16 04:38:43 +00:00
|
|
|
.linear_ramp_to_value_at_time(
|
|
|
|
point.left_volume,
|
|
|
|
start_time + f64::from(point.sample) / 44100.0,
|
|
|
|
)
|
2019-10-30 21:06:00 +00:00
|
|
|
.warn_on_error();
|
|
|
|
right_gain
|
|
|
|
.gain()
|
2020-01-16 04:38:43 +00:00
|
|
|
.linear_ramp_to_value_at_time(
|
|
|
|
point.right_volume,
|
|
|
|
start_time + f64::from(point.sample) / 44100.0,
|
|
|
|
)
|
2019-10-30 21:06:00 +00:00
|
|
|
.warn_on_error();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge the channels back together.
|
|
|
|
let merger: web_sys::AudioNode = self
|
|
|
|
.context
|
|
|
|
.create_channel_merger_with_number_of_inputs(2)
|
|
|
|
.into_js_result()?
|
|
|
|
.into();
|
|
|
|
|
|
|
|
// Wire up the nodes.
|
|
|
|
node.connect_with_audio_node(&splitter).into_js_result()?;
|
|
|
|
splitter
|
|
|
|
.connect_with_audio_node_and_output(&left_gain, 0)
|
|
|
|
.into_js_result()?;
|
2020-01-16 04:36:05 +00:00
|
|
|
// Note that for mono tracks, we want to use channel 0 (left) for both the left and right.
|
2019-10-30 21:06:00 +00:00
|
|
|
splitter
|
2020-01-16 04:36:05 +00:00
|
|
|
.connect_with_audio_node_and_output(&right_gain, if is_stereo { 1 } else { 0 })
|
2019-10-30 21:06:00 +00:00
|
|
|
.into_js_result()?;
|
|
|
|
left_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 0)
|
|
|
|
.into_js_result()?;
|
|
|
|
right_gain
|
|
|
|
.connect_with_audio_node_and_output_and_input(&merger, 0, 1)
|
|
|
|
.into_js_result()?;
|
|
|
|
|
|
|
|
Ok(merger)
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
fn decompress_to_audio_buffer(
|
|
|
|
&mut self,
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
audio_data: &[u8],
|
|
|
|
num_sample_frames: u32,
|
2019-10-28 09:23:27 +00:00
|
|
|
adpcm_block_offsets: Option<&[usize]>,
|
2020-06-17 18:42:06 +00:00
|
|
|
) -> Result<AudioBufferPtr, Error> {
|
2019-08-26 23:38:37 +00:00
|
|
|
if format.compression == AudioCompression::Mp3 {
|
2020-06-17 18:42:06 +00:00
|
|
|
return Ok(self.decompress_mp3_to_audio_buffer(format, audio_data, num_sample_frames));
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 20:29:41 +00:00
|
|
|
self.left_samples.clear();
|
|
|
|
self.right_samples.clear();
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
match format.compression {
|
2019-10-30 01:05:09 +00:00
|
|
|
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
|
2020-01-16 05:12:45 +00:00
|
|
|
use byteorder::{LittleEndian, ReadBytesExt};
|
|
|
|
let mut audio_data = audio_data;
|
|
|
|
|
|
|
|
let read_sample = |audio_data: &mut &[u8]| {
|
|
|
|
if format.is_16_bit {
|
|
|
|
f32::from(audio_data.read_i16::<LittleEndian>().unwrap_or(0)) / 32767.0
|
|
|
|
} else {
|
|
|
|
f32::from(audio_data.read_u8().unwrap_or(0)) / 128.0 - 1.0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
while !audio_data.is_empty() {
|
|
|
|
self.left_samples.push(read_sample(&mut audio_data));
|
|
|
|
if format.is_stereo {
|
|
|
|
self.right_samples.push(read_sample(&mut audio_data));
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
AudioCompression::Adpcm => {
|
2019-10-28 09:23:27 +00:00
|
|
|
// For stream sounds, the ADPCM header is included in each block,
|
|
|
|
// so we must recreate the decoder for each block.
|
|
|
|
// Event sounds don't have this issue.
|
|
|
|
let full = [0, audio_data.len()];
|
|
|
|
let adpcm_block_offsets = adpcm_block_offsets.unwrap_or(&full);
|
|
|
|
for block in adpcm_block_offsets.windows(2) {
|
|
|
|
let start = block[0];
|
|
|
|
let end = block[1];
|
2021-05-23 03:00:59 +00:00
|
|
|
let decoder = AdpcmDecoder::new(
|
2019-10-28 09:23:27 +00:00
|
|
|
&audio_data[start..end],
|
|
|
|
format.is_stereo,
|
|
|
|
format.sample_rate,
|
2021-10-14 06:12:32 +00:00
|
|
|
)?;
|
2019-10-28 09:23:27 +00:00
|
|
|
if format.is_stereo {
|
2021-05-23 03:00:59 +00:00
|
|
|
for frame in decoder {
|
2019-10-28 09:23:27 +00:00
|
|
|
let (l, r) = (frame[0], frame[1]);
|
|
|
|
self.left_samples.push(f32::from(l) / 32767.0);
|
|
|
|
self.right_samples.push(f32::from(r) / 32767.0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.left_samples
|
|
|
|
.extend(decoder.map(|n| f32::from(n[0]) / 32767.0));
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-05 20:42:57 +00:00
|
|
|
AudioCompression::Nellymoser => {
|
|
|
|
let decoder = NellymoserDecoder::new(audio_data, format.sample_rate.into());
|
|
|
|
for frame in decoder {
|
|
|
|
let (l, r) = (frame[0], frame[1]);
|
|
|
|
self.left_samples.push(f32::from(l) / 32767.0);
|
|
|
|
self.right_samples.push(f32::from(r) / 32767.0);
|
|
|
|
}
|
|
|
|
}
|
2020-06-17 18:42:06 +00:00
|
|
|
compression => return Err(format!("Unimplemented codec: {:?}", compression).into()),
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 06:55:58 +00:00
|
|
|
// This sucks. Firefox and Safari don't like low sample rates,
|
|
|
|
// so manually multiply the samples.
|
|
|
|
let sample_rate = if format.sample_rate < self.min_sample_rate {
|
|
|
|
let sample_multiplier = self.min_sample_rate / format.sample_rate;
|
2019-08-26 23:38:37 +00:00
|
|
|
let mut samples = Vec::with_capacity(self.left_samples.len() * 2);
|
|
|
|
for sample in &self.left_samples {
|
2019-12-04 06:55:58 +00:00
|
|
|
for _ in 0..sample_multiplier {
|
|
|
|
samples.push(*sample);
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
self.left_samples = samples;
|
|
|
|
|
|
|
|
if format.is_stereo {
|
|
|
|
let mut samples = Vec::with_capacity(self.right_samples.len() * 2);
|
|
|
|
for sample in &self.right_samples {
|
2019-12-04 06:55:58 +00:00
|
|
|
for _ in 0..sample_multiplier {
|
|
|
|
samples.push(*sample);
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
self.right_samples = samples;
|
|
|
|
}
|
|
|
|
|
2019-12-04 06:55:58 +00:00
|
|
|
self.min_sample_rate
|
2019-10-28 09:23:27 +00:00
|
|
|
} else {
|
2019-12-04 06:55:58 +00:00
|
|
|
format.sample_rate
|
2019-10-28 09:23:27 +00:00
|
|
|
};
|
|
|
|
|
2019-12-04 06:55:58 +00:00
|
|
|
let num_sample_frames = self.left_samples.len() as u32;
|
|
|
|
let audio_buffer = self
|
|
|
|
.context
|
|
|
|
.create_buffer(
|
|
|
|
if format.is_stereo { 2 } else { 1 },
|
|
|
|
num_sample_frames,
|
|
|
|
f32::from(sample_rate),
|
|
|
|
)
|
2021-09-11 22:48:30 +00:00
|
|
|
.map_err(|e| format!("Failed to create AudioBuffer: {:?}", e))?;
|
2019-12-04 06:55:58 +00:00
|
|
|
|
|
|
|
copy_to_audio_buffer(
|
|
|
|
&audio_buffer,
|
|
|
|
Some(&self.left_samples),
|
|
|
|
if format.is_stereo {
|
|
|
|
Some(&self.right_samples)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
|
|
|
);
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2020-06-17 18:42:06 +00:00
|
|
|
Ok(Rc::new(RefCell::new(audio_buffer)))
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn decompress_mp3_to_audio_buffer(
|
|
|
|
&mut self,
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
audio_data: &[u8],
|
|
|
|
_num_sample_frames: u32,
|
|
|
|
) -> AudioBufferPtr {
|
|
|
|
// We use the Web decodeAudioData API to decode MP3 data.
|
|
|
|
// TODO: Is it possible we finish loading before the MP3 is decoding?
|
|
|
|
let audio_buffer = self
|
|
|
|
.context
|
|
|
|
.create_buffer(1, 1, self.context.sample_rate())
|
|
|
|
.unwrap();
|
|
|
|
let audio_buffer = Rc::new(RefCell::new(audio_buffer));
|
|
|
|
|
2021-01-28 19:37:12 +00:00
|
|
|
// Clone the audio data into an ArrayBuffer
|
|
|
|
// SAFETY: (compare with the docs for `Uint8Array::view`)
|
|
|
|
// - We don't resize WASMs backing buffer before the view is cloned
|
|
|
|
// - We don't mutate `data_array`
|
|
|
|
// - Since we clone the buffer, its lifetime is correctly disconnected from `audio_data`
|
|
|
|
let array_buffer = {
|
|
|
|
let data_array = unsafe { js_sys::Uint8Array::view(audio_data) };
|
|
|
|
data_array.buffer().slice_with_end(
|
|
|
|
data_array.byte_offset(),
|
|
|
|
data_array.byte_offset() + data_array.byte_length(),
|
|
|
|
)
|
|
|
|
};
|
2019-08-26 23:38:37 +00:00
|
|
|
|
|
|
|
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() + 1));
|
|
|
|
|
|
|
|
let _num_channels = if format.is_stereo { 2 } else { 1 };
|
|
|
|
let buffer_ptr = Rc::clone(&audio_buffer);
|
|
|
|
let success_closure = Closure::wrap(Box::new(move |buffer: web_sys::AudioBuffer| {
|
|
|
|
*buffer_ptr.borrow_mut() = buffer;
|
|
|
|
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
|
|
|
|
}) as Box<dyn FnMut(web_sys::AudioBuffer)>);
|
|
|
|
let error_closure = Closure::wrap(Box::new(move || {
|
|
|
|
log::info!("Error decoding MP3 audio");
|
|
|
|
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
|
|
|
|
}) as Box<dyn FnMut()>);
|
2020-01-06 20:38:20 +00:00
|
|
|
let _ = self
|
|
|
|
.context
|
2019-08-26 23:38:37 +00:00
|
|
|
.decode_audio_data_with_success_callback_and_error_callback(
|
|
|
|
&array_buffer,
|
|
|
|
success_closure.as_ref().unchecked_ref(),
|
|
|
|
error_closure.as_ref().unchecked_ref(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// TODO: This will leak memory (once per decompressed MP3).
|
|
|
|
// Not a huge deal as there are probably not many MP3s in an SWF.
|
|
|
|
success_closure.forget();
|
|
|
|
error_closure.forget();
|
|
|
|
|
|
|
|
audio_buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
fn update_script_processor(
|
2019-09-27 19:25:22 +00:00
|
|
|
instance: &mut SoundInstance,
|
2019-08-26 23:38:37 +00:00
|
|
|
event: web_sys::AudioProcessingEvent,
|
|
|
|
) -> bool {
|
|
|
|
let mut complete = false;
|
|
|
|
let mut left_samples = vec![];
|
|
|
|
let mut right_samples = vec![];
|
2019-09-27 19:25:22 +00:00
|
|
|
if let SoundInstanceType::Decoder(ref mut decoder) = &mut instance.instance_type {
|
2019-08-26 23:38:37 +00:00
|
|
|
let output_buffer = event.output_buffer().unwrap();
|
|
|
|
let num_frames = output_buffer.length() as usize;
|
|
|
|
|
|
|
|
for _ in 0..num_frames {
|
2019-09-19 00:55:07 +00:00
|
|
|
if let Some(frame) = decoder.next() {
|
|
|
|
let (l, r) = (frame[0], frame[1]);
|
2019-08-26 23:38:37 +00:00
|
|
|
left_samples.push(f32::from(l) / 32767.0);
|
2019-09-27 19:25:22 +00:00
|
|
|
if instance.format.is_stereo {
|
2019-08-26 23:38:37 +00:00
|
|
|
right_samples.push(f32::from(r) / 32767.0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
complete = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-12-04 06:55:58 +00:00
|
|
|
copy_to_audio_buffer(
|
|
|
|
&output_buffer,
|
|
|
|
Some(&left_samples),
|
|
|
|
if instance.format.is_stereo {
|
|
|
|
Some(&right_samples)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
|
|
|
);
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
complete
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AudioBackend for WebAudioBackend {
|
2019-11-08 20:07:01 +00:00
|
|
|
fn set_frame_rate(&mut self, frame_rate: f64) {
|
|
|
|
self.frame_rate = frame_rate
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
fn register_sound(&mut self, sound: &swf::Sound) -> Result<SoundHandle, Error> {
|
|
|
|
// Slice off latency seek for MP3 data.
|
2019-10-30 01:05:09 +00:00
|
|
|
let (skip_sample_frames, data) = if sound.format.compression == AudioCompression::Mp3 {
|
2021-06-22 10:04:27 +00:00
|
|
|
let skip_sample_frames = u16::from_le_bytes([sound.data[0], sound.data[1]]);
|
2019-10-30 01:05:09 +00:00
|
|
|
(skip_sample_frames, &sound.data[2..])
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
2021-02-12 13:03:17 +00:00
|
|
|
(0, sound.data)
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let sound = Sound {
|
|
|
|
format: sound.format.clone(),
|
|
|
|
source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(
|
|
|
|
&sound.format,
|
|
|
|
data,
|
|
|
|
sound.num_samples,
|
2019-10-28 09:23:27 +00:00
|
|
|
None,
|
2020-06-17 18:42:06 +00:00
|
|
|
)?),
|
2019-10-30 01:05:09 +00:00
|
|
|
num_sample_frames: sound.num_samples,
|
|
|
|
skip_sample_frames,
|
2021-01-26 08:50:19 +00:00
|
|
|
stream_segments: vec![],
|
2021-08-13 02:47:20 +00:00
|
|
|
size: data.len() as u32,
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
|
|
|
Ok(self.sounds.insert(sound))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn preload_sound_stream_head(
|
|
|
|
&mut self,
|
|
|
|
stream_info: &swf::SoundStreamHead,
|
2021-01-26 08:50:19 +00:00
|
|
|
) -> Option<PreloadStreamHandle> {
|
|
|
|
let stream_id = self.next_stream_id;
|
|
|
|
self.next_stream_id = self.next_stream_id.wrapping_add(1);
|
|
|
|
self.preload_stream_data
|
|
|
|
.entry(stream_id)
|
2019-08-26 23:38:37 +00:00
|
|
|
.or_insert_with(|| StreamData {
|
|
|
|
format: stream_info.stream_format.clone(),
|
|
|
|
audio_data: vec![],
|
|
|
|
num_sample_frames: 0,
|
|
|
|
samples_per_block: stream_info.num_samples_per_block.into(),
|
2019-10-30 01:05:09 +00:00
|
|
|
skip_sample_frames: stream_info.latency_seek as u16,
|
2019-10-28 09:23:27 +00:00
|
|
|
adpcm_block_offsets: vec![],
|
2019-10-30 00:02:11 +00:00
|
|
|
stream_segments: vec![],
|
|
|
|
last_clip_frame: 0,
|
2022-01-04 01:45:14 +00:00
|
|
|
mp3_samples_buffered: 0,
|
2019-08-26 23:38:37 +00:00
|
|
|
});
|
2021-01-26 08:50:19 +00:00
|
|
|
Some(stream_id)
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 00:02:11 +00:00
|
|
|
fn preload_sound_stream_block(
|
|
|
|
&mut self,
|
2021-01-26 08:50:19 +00:00
|
|
|
stream_id: PreloadStreamHandle,
|
2019-10-30 00:02:11 +00:00
|
|
|
clip_frame: u16,
|
2022-01-04 01:45:14 +00:00
|
|
|
mut audio_data: &[u8],
|
2019-10-30 00:02:11 +00:00
|
|
|
) {
|
2021-01-26 08:50:19 +00:00
|
|
|
if let Some(stream) = self.preload_stream_data.get_mut(&stream_id) {
|
2022-01-04 01:45:14 +00:00
|
|
|
let num_samples = match stream.format.compression {
|
2019-08-26 23:38:37 +00:00
|
|
|
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
|
|
|
|
let frame_len = if stream.format.is_stereo { 2 } else { 1 }
|
|
|
|
* if stream.format.is_16_bit { 2 } else { 1 };
|
2022-01-04 01:45:14 +00:00
|
|
|
(audio_data.len() as u32) / frame_len
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
AudioCompression::Mp3 => {
|
2020-04-27 14:49:59 +00:00
|
|
|
if audio_data.len() >= 4 {
|
2022-01-04 01:45:14 +00:00
|
|
|
// Read number of samples from SWF MP3 header.
|
|
|
|
let num_samples = u16::from_le_bytes([audio_data[0], audio_data[1]]);
|
|
|
|
// MP3 audio stream can sometimes has SoundStreamBlock with 0 samples, or
|
|
|
|
// sometimes even has frames wihout a SoundStreamBlock tag, so keep a
|
|
|
|
// running tally of how many samples are waiting.
|
|
|
|
// When a new block is encountered, subtract the number of samples consumed by
|
|
|
|
// the prior timeline frames.
|
|
|
|
stream.mp3_samples_buffered += i32::from(num_samples);
|
|
|
|
stream.mp3_samples_buffered -=
|
|
|
|
i32::from(clip_frame - stream.last_clip_frame)
|
|
|
|
* stream.samples_per_block as i32;
|
|
|
|
audio_data = &audio_data[4..];
|
|
|
|
u32::from(num_samples)
|
|
|
|
} else {
|
|
|
|
0
|
2020-04-27 14:49:59 +00:00
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
2019-10-28 09:23:27 +00:00
|
|
|
AudioCompression::Adpcm => {
|
|
|
|
// For ADPCM data, we must keep track of where each block starts,
|
|
|
|
// so that we read the header in each block.
|
|
|
|
stream.adpcm_block_offsets.push(stream.audio_data.len());
|
2022-01-04 01:45:14 +00:00
|
|
|
stream.samples_per_block
|
2020-12-05 20:42:57 +00:00
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
_ => {
|
2022-01-04 01:45:14 +00:00
|
|
|
// TODO: This is a guess and may vary slightly from block to block?
|
|
|
|
stream.samples_per_block
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
2022-01-04 01:45:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Handle gaps in streaming audio by storing each continuous segment of the stream.
|
|
|
|
let mut new_stream_segment = stream.last_clip_frame + 1 != clip_frame;
|
|
|
|
if stream.format.compression == AudioCompression::Mp3 {
|
|
|
|
new_stream_segment =
|
|
|
|
new_stream_segment && stream.mp3_samples_buffered <= 0 && num_samples > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the starting offsets for each stream segment.
|
|
|
|
if stream.audio_data.is_empty() || new_stream_segment {
|
|
|
|
let sample_mult = 44100 / stream.format.sample_rate;
|
|
|
|
let start_sample = stream.num_sample_frames * u32::from(sample_mult);
|
|
|
|
stream.stream_segments.push((clip_frame, start_sample));
|
|
|
|
stream.mp3_samples_buffered = num_samples as i32;
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
2022-01-04 01:45:14 +00:00
|
|
|
stream.num_sample_frames += num_samples;
|
|
|
|
stream.audio_data.extend_from_slice(audio_data);
|
|
|
|
stream.last_clip_frame = clip_frame;
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-26 08:50:19 +00:00
|
|
|
fn preload_sound_stream_end(&mut self, stream_id: PreloadStreamHandle) -> Option<SoundHandle> {
|
|
|
|
let stream_data = self.preload_stream_data.remove(&stream_id);
|
2019-10-29 01:12:44 +00:00
|
|
|
|
|
|
|
if let Some(mut stream) = stream_data {
|
2019-10-30 00:02:11 +00:00
|
|
|
if !stream.audio_data.is_empty() {
|
2020-06-17 18:42:06 +00:00
|
|
|
if let Ok(audio_buffer) = self.decompress_to_audio_buffer(
|
2019-10-30 00:02:11 +00:00
|
|
|
&stream.format,
|
|
|
|
&stream.audio_data[..],
|
|
|
|
stream.num_sample_frames,
|
|
|
|
if stream.format.compression == AudioCompression::Adpcm {
|
|
|
|
stream.adpcm_block_offsets.push(stream.audio_data.len());
|
|
|
|
Some(&stream.adpcm_block_offsets[..])
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
2020-06-17 18:42:06 +00:00
|
|
|
) {
|
|
|
|
let handle = self.sounds.insert(Sound {
|
|
|
|
format: stream.format,
|
|
|
|
source: SoundSource::AudioBuffer(audio_buffer),
|
|
|
|
num_sample_frames: stream.num_sample_frames,
|
|
|
|
skip_sample_frames: stream.skip_sample_frames,
|
2021-01-26 08:50:19 +00:00
|
|
|
stream_segments: stream.stream_segments,
|
2021-08-13 02:47:20 +00:00
|
|
|
size: stream.audio_data.len() as u32,
|
2020-06-17 18:42:06 +00:00
|
|
|
});
|
2021-01-26 08:50:19 +00:00
|
|
|
return Some(handle);
|
2020-06-17 18:42:06 +00:00
|
|
|
}
|
2019-10-30 00:02:11 +00:00
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
2021-01-26 08:50:19 +00:00
|
|
|
|
|
|
|
None
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2020-01-02 03:33:21 +00:00
|
|
|
fn start_sound(
|
|
|
|
&mut self,
|
|
|
|
sound: SoundHandle,
|
|
|
|
sound_info: &swf::SoundInfo,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<SoundInstanceHandle, Error> {
|
2022-01-04 01:45:14 +00:00
|
|
|
let handle = self.start_sound_internal(sound, Some(sound_info), false)?;
|
2020-06-17 18:42:06 +00:00
|
|
|
Ok(handle)
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn start_stream(
|
|
|
|
&mut self,
|
2021-01-26 08:50:19 +00:00
|
|
|
stream_handle: Option<SoundHandle>,
|
2019-10-29 01:12:44 +00:00
|
|
|
clip_frame: u16,
|
2019-08-26 23:38:37 +00:00
|
|
|
_clip_data: ruffle_core::tag_utils::SwfSlice,
|
|
|
|
_stream_info: &swf::SoundStreamHead,
|
2021-01-26 08:50:19 +00:00
|
|
|
) -> Result<SoundInstanceHandle, Error> {
|
|
|
|
if let Some(stream) = stream_handle {
|
2019-10-29 01:12:44 +00:00
|
|
|
let mut sound_info = None;
|
2022-01-04 01:45:14 +00:00
|
|
|
if let Some(sound) = self.sounds.get(stream) {
|
|
|
|
// Figure out the frame and sample where this stream segment first starts.
|
|
|
|
let (start_pos, end_pos) = match sound
|
|
|
|
.stream_segments
|
|
|
|
.binary_search_by(|(f, _)| f.cmp(&clip_frame))
|
|
|
|
{
|
|
|
|
Ok(i) => (
|
|
|
|
sound.stream_segments[i].1,
|
|
|
|
sound.stream_segments.get(i + 1).map(|s| s.1),
|
|
|
|
),
|
|
|
|
Err(i) => {
|
|
|
|
let (segment_frame, segment_sample) = sound
|
|
|
|
.stream_segments
|
|
|
|
.get(i.saturating_sub(1))
|
|
|
|
.copied()
|
|
|
|
.unwrap_or_default();
|
|
|
|
let frames_skipped = u32::from(clip_frame.saturating_sub(segment_frame));
|
|
|
|
let samples_per_frame = 44100.0 / self.frame_rate;
|
|
|
|
let start_pos =
|
|
|
|
segment_sample + (f64::from(frames_skipped) * samples_per_frame) as u32;
|
|
|
|
let end_pos = if let Some(next_segment) = sound.stream_segments.get(i) {
|
|
|
|
Some(next_segment.1)
|
|
|
|
} else if start_pos
|
|
|
|
< sound.num_sample_frames
|
|
|
|
* (44100 / u32::from(sound.format.sample_rate))
|
|
|
|
{
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
return Err("Stream sound out of range".into());
|
|
|
|
};
|
|
|
|
(start_pos, end_pos)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
sound_info = Some(swf::SoundInfo {
|
|
|
|
event: swf::SoundEvent::Event,
|
|
|
|
in_sample: Some(start_pos),
|
|
|
|
out_sample: end_pos,
|
|
|
|
num_loops: 1,
|
|
|
|
envelope: None,
|
|
|
|
});
|
2019-10-29 01:12:44 +00:00
|
|
|
}
|
2022-01-04 01:45:14 +00:00
|
|
|
let instance = self.start_sound_internal(stream, sound_info.as_ref(), true)?;
|
2021-01-26 08:50:19 +00:00
|
|
|
Ok(instance)
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
2021-01-26 08:50:19 +00:00
|
|
|
let msg = format!("Missing stream for sound ID {:?}", stream_handle);
|
2020-06-17 18:20:24 +00:00
|
|
|
log::error!("{}", msg);
|
|
|
|
Err(msg.into())
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-02 03:33:21 +00:00
|
|
|
fn stop_sound(&mut self, sound: SoundInstanceHandle) {
|
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
2020-10-26 02:34:52 +00:00
|
|
|
instances.remove(sound);
|
2020-01-02 03:33:21 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
fn is_loading_complete(&self) -> bool {
|
|
|
|
NUM_SOUNDS_LOADING.with(|n| n.get() == 0)
|
|
|
|
}
|
|
|
|
|
2020-09-23 01:18:22 +00:00
|
|
|
fn play(&mut self) {
|
2019-08-26 23:38:37 +00:00
|
|
|
// Allow audio to start playing after a user gesture.
|
|
|
|
let _ = self.context.resume();
|
|
|
|
}
|
2019-09-19 07:21:22 +00:00
|
|
|
|
2020-09-23 01:18:22 +00:00
|
|
|
fn pause(&mut self) {
|
2020-09-18 22:52:35 +00:00
|
|
|
// Suspend audio to be resumed later.
|
|
|
|
let _ = self.context.suspend();
|
|
|
|
}
|
|
|
|
|
2019-09-19 07:21:22 +00:00
|
|
|
fn stop_all_sounds(&mut self) {
|
2019-09-27 19:25:22 +00:00
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
2020-10-26 02:12:28 +00:00
|
|
|
// This is a workaround for a bug in generational-arena:
|
|
|
|
// Arena::clear does not properly bump the generational index, allowing for stale references
|
|
|
|
// to continue to work (this caused #1315). Arena::remove will force a generation bump.
|
|
|
|
// See https://github.com/fitzgen/generational-arena/issues/30
|
|
|
|
if let Some((i, _)) = instances.iter().next() {
|
|
|
|
instances.remove(i);
|
|
|
|
}
|
2019-09-27 19:25:22 +00:00
|
|
|
instances.clear();
|
2019-09-19 07:21:22 +00:00
|
|
|
})
|
|
|
|
}
|
2019-09-27 17:23:53 +00:00
|
|
|
|
2021-10-16 05:45:01 +00:00
|
|
|
fn get_sound_position(&self, instance: SoundInstanceHandle) -> Option<f64> {
|
2021-01-23 02:03:59 +00:00
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let instances = instances.borrow();
|
2021-10-16 08:25:31 +00:00
|
|
|
instances.get(instance).map(|instance| {
|
|
|
|
// Estimate the position of the sound based on the current AudioContext time.
|
|
|
|
let mut dt = self.output_time - instance.start_time;
|
|
|
|
dt = dt.max(0.0);
|
|
|
|
let loop_time = instance.loop_end - instance.loop_start;
|
|
|
|
let loop_index = (dt / loop_time) as u16;
|
|
|
|
// If the sound is looping, the position cycles between the start and end times,
|
|
|
|
// except on the final loop, where we clamp to the final position.
|
|
|
|
if loop_index < instance.num_loops {
|
|
|
|
dt = dt.rem_euclid(loop_time);
|
|
|
|
}
|
|
|
|
dt += instance.loop_start;
|
|
|
|
dt = dt.min(instance.loop_end);
|
|
|
|
dt * 1000.0
|
|
|
|
})
|
2021-01-23 02:03:59 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-08-13 22:50:15 +00:00
|
|
|
fn get_sound_duration(&self, sound: SoundHandle) -> Option<f64> {
|
2020-01-03 02:23:58 +00:00
|
|
|
if let Some(sound) = self.sounds.get(sound) {
|
2021-04-16 15:43:17 +00:00
|
|
|
// AS duration does not subtract `skip_sample_frames`.
|
|
|
|
let num_sample_frames: f64 = sound.num_sample_frames.into();
|
|
|
|
let sample_rate: f64 = sound.format.sample_rate.into();
|
2021-08-13 22:50:15 +00:00
|
|
|
let ms = num_sample_frames * 1000.0 / sample_rate;
|
|
|
|
Some(ms)
|
2020-01-03 02:23:58 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2021-01-24 08:16:07 +00:00
|
|
|
|
2021-08-13 02:47:20 +00:00
|
|
|
fn get_sound_size(&self, sound: SoundHandle) -> Option<u32> {
|
|
|
|
self.sounds.get(sound).map(|s| s.size)
|
|
|
|
}
|
|
|
|
|
2021-08-17 22:19:59 +00:00
|
|
|
fn get_sound_format(&self, sound: SoundHandle) -> Option<&swf::SoundFormat> {
|
|
|
|
self.sounds.get(sound).map(|s| &s.format)
|
|
|
|
}
|
|
|
|
|
2021-01-24 08:16:07 +00:00
|
|
|
fn set_sound_transform(&mut self, instance: SoundInstanceHandle, transform: SoundTransform) {
|
|
|
|
SOUND_INSTANCES.with(|instances| {
|
|
|
|
let mut instances = instances.borrow_mut();
|
|
|
|
if let Some(instance) = instances.get_mut(instance) {
|
|
|
|
if let SoundInstanceType::AudioBuffer(sound) = &mut instance.instance_type {
|
|
|
|
sound.set_transform(&self.context, &transform);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2021-10-16 08:25:31 +00:00
|
|
|
|
|
|
|
fn tick(&mut self) {
|
|
|
|
// Update the output timestamp.
|
|
|
|
// We do this once per frame to avoid spamming it in `get_sound_position`.
|
|
|
|
self.output_time = get_audio_output_timestamp(&self.context);
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-13 20:40:15 +00:00
|
|
|
#[wasm_bindgen(raw_module = "./ruffle-imports.js")]
|
2019-12-04 06:55:58 +00:00
|
|
|
extern "C" {
|
|
|
|
/// Imported JS method to copy data into an `AudioBuffer`.
|
|
|
|
/// We'd prefer to use `AudioBuffer.copyToChannel`, but this isn't supported
|
|
|
|
/// on Safari.
|
2020-11-16 22:59:51 +00:00
|
|
|
#[wasm_bindgen(js_name = "copyToAudioBuffer")]
|
2019-12-04 06:55:58 +00:00
|
|
|
fn copy_to_audio_buffer(
|
|
|
|
audio_buffer: &web_sys::AudioBuffer,
|
|
|
|
left_data: Option<&[f32]>,
|
|
|
|
right_data: Option<&[f32]>,
|
|
|
|
);
|
2021-10-16 08:25:31 +00:00
|
|
|
|
|
|
|
/// Imported JS method to call `AudioContext.getOutputTimestamp` because
|
|
|
|
/// it is not yet available in `web_sys`.
|
|
|
|
#[wasm_bindgen(js_name = "getAudioOutputTimestamp")]
|
|
|
|
fn get_audio_output_timestamp(context: &web_sys::AudioContext) -> f64;
|
2019-12-04 06:55:58 +00:00
|
|
|
}
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
// Janky resmapling code.
|
|
|
|
// TODO: Clean this up.
|
2020-05-30 10:47:12 +00:00
|
|
|
#[allow(unused_assignments)]
|
2019-08-26 23:38:37 +00:00
|
|
|
fn resample(
|
2019-09-19 00:55:07 +00:00
|
|
|
mut input: impl Iterator<Item = [i16; 2]>,
|
2019-08-26 23:38:37 +00:00
|
|
|
input_sample_rate: u16,
|
|
|
|
output_sample_rate: u16,
|
2019-09-19 00:55:07 +00:00
|
|
|
) -> impl Iterator<Item = [i16; 2]> {
|
|
|
|
let (mut left0, mut right0) = if let Some(frame) = input.next() {
|
|
|
|
(Some(frame[0]), Some(frame[1]))
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
2019-09-19 00:55:07 +00:00
|
|
|
(None, None)
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
2019-09-19 00:55:07 +00:00
|
|
|
let (mut left1, mut right1) = if let Some(frame) = input.next() {
|
|
|
|
(Some(frame[0]), Some(frame[1]))
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
2019-09-19 00:55:07 +00:00
|
|
|
(None, None)
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
|
|
|
let (mut left, mut right) = (left0.unwrap(), right0.unwrap());
|
|
|
|
let dt_input = 1.0 / f64::from(input_sample_rate);
|
|
|
|
let dt_output = 1.0 / f64::from(output_sample_rate);
|
|
|
|
let mut t = 0.0;
|
|
|
|
std::iter::from_fn(move || {
|
|
|
|
if let (Some(l0), Some(r0), Some(l1), Some(r1)) = (left0, right0, left1, right1) {
|
|
|
|
let a = t / dt_input;
|
2021-06-22 10:04:27 +00:00
|
|
|
let l0: f64 = l0.into();
|
|
|
|
let l1: f64 = l1.into();
|
|
|
|
let r0: f64 = r0.into();
|
|
|
|
let r1: f64 = r1.into();
|
2019-08-26 23:38:37 +00:00
|
|
|
left = (l0 + (l1 - l0) * a) as i16;
|
|
|
|
right = (r0 + (r1 - r0) * a) as i16;
|
|
|
|
t += dt_output;
|
|
|
|
while t >= dt_input {
|
|
|
|
t -= dt_input;
|
|
|
|
left0 = left1;
|
|
|
|
right0 = right1;
|
2019-09-19 00:55:07 +00:00
|
|
|
if let Some(frame) = input.next() {
|
|
|
|
left1 = Some(frame[0]);
|
|
|
|
right1 = Some(frame[1]);
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
2019-09-19 00:55:07 +00:00
|
|
|
left1 = None;
|
|
|
|
right1 = None;
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-19 00:55:07 +00:00
|
|
|
Some([left, right])
|
2019-08-26 23:38:37 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|