2020-07-10 22:24:05 +00:00
|
|
|
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
|
2019-08-26 23:38:37 +00:00
|
|
|
use generational_arena::Arena;
|
2019-09-19 00:55:07 +00:00
|
|
|
use ruffle_core::backend::audio::decoders::{
|
2019-10-28 08:48:14 +00:00
|
|
|
self, AdpcmDecoder, Mp3Decoder, PcmDecoder, SeekableDecoder,
|
2019-09-19 00:55:07 +00:00
|
|
|
};
|
2020-01-02 03:33:21 +00:00
|
|
|
use ruffle_core::backend::audio::{
|
|
|
|
swf, AudioBackend, AudioStreamHandle, SoundHandle, SoundInstanceHandle,
|
|
|
|
};
|
2019-10-28 08:48:14 +00:00
|
|
|
use ruffle_core::tag_utils::SwfSlice;
|
|
|
|
use std::io::Cursor;
|
2019-09-19 00:55:07 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-10-30 01:27:29 +00:00
|
|
|
use swf::AudioCompression;
|
2019-09-19 00:55:07 +00:00
|
|
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
pub struct CpalAudioBackend {
|
|
|
|
device: cpal::Device,
|
2020-07-10 22:24:05 +00:00
|
|
|
output_config: cpal::StreamConfig,
|
|
|
|
stream: Stream,
|
2019-08-26 23:38:37 +00:00
|
|
|
sounds: Arena<Sound>,
|
2019-09-19 00:55:07 +00:00
|
|
|
sound_instances: Arc<Mutex<Arena<SoundInstance>>>,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 22:24:05 +00:00
|
|
|
// Because of https://github.com/RustAudio/cpal/pull/348, we have to initialize cpal on a
|
|
|
|
// separate thread (see `new` below). Unfortunately `cpal::Stream` is marked `!Send`, but
|
|
|
|
// we know this should be safe (since we aren't accessing the stream at all after creation;
|
|
|
|
// we just want to keep it alive)
|
|
|
|
struct Stream(cpal::Stream);
|
|
|
|
unsafe impl Send for CpalAudioBackend {}
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
type Signal = Box<dyn Send + sample::signal::Signal<Frame = [i16; 2]>>;
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2020-06-17 18:20:24 +00:00
|
|
|
type Error = Box<dyn std::error::Error>;
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
/// Contains the data and metadata for a sound in an SWF file.
|
|
|
|
/// A `Sound` is defined by the `DefineSound` SWF tags.
|
2019-08-26 23:38:37 +00:00
|
|
|
struct Sound {
|
|
|
|
format: swf::SoundFormat,
|
|
|
|
data: Arc<Vec<u8>>,
|
2019-10-30 01:27:29 +00:00
|
|
|
/// Number of samples in this audio.
|
|
|
|
/// This does not include the skip_sample_frames.
|
|
|
|
num_sample_frames: u32,
|
|
|
|
|
|
|
|
/// Number of samples to skip encoder delay.
|
|
|
|
skip_sample_frames: u16,
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
/// An actively playing instance of a sound.
|
|
|
|
/// This sound can be either an event sound (`StartSound`) or
|
|
|
|
/// a stream sound (`SoundStreamBlock`).
|
|
|
|
/// The audio thread will iterate through all `SoundInstance`s
|
|
|
|
/// to fill the audio buffer.
|
|
|
|
#[allow(dead_code)]
|
|
|
|
struct SoundInstance {
|
|
|
|
/// The handle the sound definition inside `sounds`.
|
|
|
|
/// `None` if this is a stream sound.
|
|
|
|
handle: Option<SoundHandle>,
|
|
|
|
|
|
|
|
/// The audio stream. Call `next()` to yield sample frames.
|
|
|
|
signal: Signal,
|
|
|
|
|
|
|
|
/// The character ID of the movie clip that contains this stream.
|
|
|
|
/// `None` if this sound is an event sound (`StartSound`).
|
|
|
|
clip_id: Option<swf::CharacterId>,
|
|
|
|
|
|
|
|
/// Flag indicating whether this sound is still playing.
|
|
|
|
/// If this flag is false, the sound will be cleaned up during the
|
|
|
|
/// next loop of the sound thread.
|
|
|
|
active: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CpalAudioBackend {
|
2020-06-17 18:20:24 +00:00
|
|
|
pub fn new() -> Result<Self, Error> {
|
2019-11-18 20:07:14 +00:00
|
|
|
// Initialize cpal on a separate thread to issues on Windows with cpal + winit:
|
|
|
|
// https://github.com/RustAudio/cpal/pull/348
|
|
|
|
// TODO: Revert back to doing this on the same thread when the above is fixed.
|
|
|
|
let init_thread = std::thread::spawn(move || -> Result<Self, String> {
|
|
|
|
Self::init().map_err(|e| e.to_string())
|
|
|
|
});
|
|
|
|
|
|
|
|
match init_thread.join() {
|
|
|
|
Ok(Ok(audio)) => Ok(audio),
|
|
|
|
Ok(Err(e)) => Err(e.into()),
|
|
|
|
Err(_) => Err("Panic when initializing audio".into()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-17 18:20:24 +00:00
|
|
|
fn init() -> Result<Self, Error> {
|
2019-09-19 00:55:07 +00:00
|
|
|
// Create CPAL audio device.
|
|
|
|
let host = cpal::default_host();
|
|
|
|
let device = host
|
|
|
|
.default_output_device()
|
|
|
|
.ok_or("No audio devices available")?;
|
|
|
|
|
|
|
|
// Create audio stream for device.
|
2020-07-10 22:24:05 +00:00
|
|
|
let mut supported_configs = device
|
|
|
|
.supported_output_configs()
|
2019-09-19 00:55:07 +00:00
|
|
|
.map_err(|_| "No supported audio format")?;
|
2020-07-10 22:24:05 +00:00
|
|
|
let config = supported_configs
|
2019-09-19 00:55:07 +00:00
|
|
|
.next()
|
|
|
|
.ok_or("No supported audio formats")?
|
|
|
|
.with_max_sample_rate();
|
2020-07-10 22:24:05 +00:00
|
|
|
let sample_format = config.sample_format();
|
|
|
|
let config = cpal::StreamConfig::from(config);
|
2019-09-19 00:55:07 +00:00
|
|
|
|
|
|
|
let sound_instances: Arc<Mutex<Arena<SoundInstance>>> = Arc::new(Mutex::new(Arena::new()));
|
|
|
|
|
2020-07-10 22:24:05 +00:00
|
|
|
// Start the audio stream.
|
|
|
|
let stream = {
|
2019-09-19 00:55:07 +00:00
|
|
|
let sound_instances = Arc::clone(&sound_instances);
|
2020-07-10 22:24:05 +00:00
|
|
|
let error_handler = move |err| log::error!("Audio stream error: {}", err);
|
|
|
|
let output_config = config.clone();
|
2019-09-19 00:55:07 +00:00
|
|
|
|
2020-07-10 22:24:05 +00:00
|
|
|
use cpal::SampleFormat;
|
|
|
|
match sample_format {
|
|
|
|
SampleFormat::F32 => device.build_output_stream(
|
|
|
|
&config,
|
|
|
|
move |buffer, _| {
|
|
|
|
let mut sound_instances = sound_instances.lock().unwrap();
|
|
|
|
Self::mix_audio::<f32>(&mut sound_instances, &output_config, buffer)
|
|
|
|
},
|
|
|
|
error_handler,
|
|
|
|
),
|
|
|
|
SampleFormat::I16 => device.build_output_stream(
|
|
|
|
&config,
|
|
|
|
move |buffer, _| {
|
|
|
|
let mut sound_instances = sound_instances.lock().unwrap();
|
|
|
|
Self::mix_audio::<i16>(&mut sound_instances, &output_config, buffer)
|
|
|
|
},
|
|
|
|
error_handler,
|
|
|
|
),
|
|
|
|
SampleFormat::U16 => device.build_output_stream(
|
|
|
|
&config,
|
|
|
|
move |buffer, _| {
|
|
|
|
let mut sound_instances = sound_instances.lock().unwrap();
|
|
|
|
Self::mix_audio::<u16>(&mut sound_instances, &output_config, buffer)
|
|
|
|
},
|
|
|
|
error_handler,
|
|
|
|
),
|
|
|
|
}?
|
2019-09-19 00:55:07 +00:00
|
|
|
};
|
|
|
|
|
2020-07-10 22:24:05 +00:00
|
|
|
stream.play()?;
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
Ok(Self {
|
2019-09-19 00:55:07 +00:00
|
|
|
device,
|
2020-07-10 22:24:05 +00:00
|
|
|
output_config: config,
|
|
|
|
stream: Stream(stream),
|
2019-08-26 23:38:37 +00:00
|
|
|
sounds: Arena::new(),
|
2019-09-19 00:55:07 +00:00
|
|
|
sound_instances,
|
2019-08-26 23:38:37 +00:00
|
|
|
})
|
|
|
|
}
|
2019-09-19 00:55:07 +00:00
|
|
|
|
2019-09-19 05:52:24 +00:00
|
|
|
/// Instantiate a seeabkle decoder for the compression that the sound data uses.
|
|
|
|
fn make_seekable_decoder(
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
data: Cursor<VecAsRef>,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<Box<dyn Send + SeekableDecoder>, Error> {
|
|
|
|
let decoder: Box<dyn Send + SeekableDecoder> = match format.compression {
|
2019-09-19 05:52:24 +00:00
|
|
|
AudioCompression::Uncompressed => Box::new(PcmDecoder::new(
|
|
|
|
data,
|
|
|
|
format.is_stereo,
|
|
|
|
format.sample_rate,
|
|
|
|
format.is_16_bit,
|
|
|
|
)),
|
2019-10-28 08:48:14 +00:00
|
|
|
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new(
|
|
|
|
data,
|
|
|
|
format.is_stereo,
|
|
|
|
format.sample_rate,
|
|
|
|
)),
|
2019-09-19 05:52:24 +00:00
|
|
|
AudioCompression::Mp3 => Box::new(Mp3Decoder::new(
|
|
|
|
if format.is_stereo { 2 } else { 1 },
|
|
|
|
format.sample_rate.into(),
|
|
|
|
data,
|
|
|
|
)),
|
|
|
|
_ => {
|
2020-06-17 18:20:24 +00:00
|
|
|
let msg = format!(
|
2019-09-19 05:52:24 +00:00
|
|
|
"start_stream: Unhandled audio compression {:?}",
|
|
|
|
format.compression
|
|
|
|
);
|
2020-06-17 18:20:24 +00:00
|
|
|
log::error!("{}", msg);
|
|
|
|
return Err(msg.into());
|
2019-09-19 05:52:24 +00:00
|
|
|
}
|
2020-06-17 18:20:24 +00:00
|
|
|
};
|
|
|
|
Ok(decoder)
|
2019-09-19 05:52:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Resamples a stream.
|
|
|
|
/// TODO: Allow interpolator to be user-configurable?
|
|
|
|
fn make_resampler<S: Send + sample::signal::Signal<Frame = [i16; 2]>>(
|
|
|
|
&self,
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
mut signal: S,
|
|
|
|
) -> sample::interpolate::Converter<S, impl sample::interpolate::Interpolator<Frame = [i16; 2]>>
|
|
|
|
{
|
2019-09-19 00:55:07 +00:00
|
|
|
let interpolator = sample::interpolate::Linear::from_source(&mut signal);
|
2019-09-19 05:52:24 +00:00
|
|
|
sample::interpolate::Converter::from_hz_to_hz(
|
2019-09-19 00:55:07 +00:00
|
|
|
signal,
|
|
|
|
interpolator,
|
|
|
|
format.sample_rate.into(),
|
2020-07-10 22:24:05 +00:00
|
|
|
self.output_config.sample_rate.0.into(),
|
2019-09-19 05:52:24 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a `sample::signal::Signal` that decodes and resamples the audio stream
|
|
|
|
/// to the output format.
|
|
|
|
fn make_signal_from_event_sound(
|
|
|
|
&self,
|
2019-10-30 01:27:29 +00:00
|
|
|
sound: &Sound,
|
2019-09-19 05:52:24 +00:00
|
|
|
settings: &swf::SoundInfo,
|
|
|
|
data: Cursor<VecAsRef>,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<Box<dyn Send + sample::signal::Signal<Frame = [i16; 2]>>, Error> {
|
2019-09-19 05:52:24 +00:00
|
|
|
// Instantiate a decoder for the compression that the sound data uses.
|
2020-06-17 18:20:24 +00:00
|
|
|
let decoder = Self::make_seekable_decoder(&sound.format, data)?;
|
2019-09-19 05:52:24 +00:00
|
|
|
|
|
|
|
// Wrap the decoder in the event sound signal (controls looping/envelope)
|
2019-10-30 01:27:29 +00:00
|
|
|
let signal = EventSoundSignal::new_with_settings(
|
|
|
|
decoder,
|
|
|
|
settings,
|
|
|
|
sound.num_sample_frames,
|
|
|
|
sound.skip_sample_frames,
|
|
|
|
);
|
2019-09-19 05:52:24 +00:00
|
|
|
// Convert the `Decoder` to a `Signal`, and resample it the the output
|
|
|
|
// sample rate.
|
2019-10-30 01:27:29 +00:00
|
|
|
let signal = self.make_resampler(&sound.format, signal);
|
2020-06-17 18:20:24 +00:00
|
|
|
Ok(Box::new(signal))
|
2019-09-19 05:52:24 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 08:48:14 +00:00
|
|
|
/// Creates a `sample::signal::Signal` that decodes and resamples a "stream" sound.
|
|
|
|
fn make_signal_from_stream<'a>(
|
|
|
|
&self,
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
data_stream: SwfSlice,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<Box<dyn 'a + Send + sample::signal::Signal<Frame = [i16; 2]>>, Error> {
|
2019-10-28 08:48:14 +00:00
|
|
|
// Instantiate a decoder for the compression that the sound data uses.
|
2020-06-17 18:20:24 +00:00
|
|
|
let clip_stream_decoder = decoders::make_stream_decoder(format, data_stream)?;
|
2019-10-28 08:48:14 +00:00
|
|
|
|
|
|
|
// Convert the `Decoder` to a `Signal`, and resample it the the output
|
|
|
|
// sample rate.
|
|
|
|
let signal = sample::signal::from_iter(clip_stream_decoder);
|
|
|
|
let signal = Box::new(self.make_resampler(format, signal));
|
2020-06-17 18:20:24 +00:00
|
|
|
Ok(Box::new(signal))
|
2019-10-28 08:48:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 05:52:24 +00:00
|
|
|
/// Creates a `sample::signal::Signal` that decodes and resamples the audio stream
|
|
|
|
/// to the output format.
|
2019-10-28 08:48:14 +00:00
|
|
|
fn make_signal_from_simple_event_sound<'a, R: 'a + std::io::Read + Send>(
|
2019-09-19 05:52:24 +00:00
|
|
|
&self,
|
|
|
|
format: &swf::SoundFormat,
|
|
|
|
data_stream: R,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<Box<dyn 'a + Send + sample::signal::Signal<Frame = [i16; 2]>>, Error> {
|
2019-09-19 05:52:24 +00:00
|
|
|
// Instantiate a decoder for the compression that the sound data uses.
|
2020-06-17 18:20:24 +00:00
|
|
|
let decoder = decoders::make_decoder(format, data_stream)?;
|
2019-09-19 05:52:24 +00:00
|
|
|
|
|
|
|
// Convert the `Decoder` to a `Signal`, and resample it the the output
|
|
|
|
// sample rate.
|
|
|
|
let signal = sample::signal::from_iter(decoder);
|
|
|
|
let signal = self.make_resampler(format, signal);
|
2020-06-17 18:20:24 +00:00
|
|
|
Ok(Box::new(signal))
|
2019-09-19 00:55:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Callback to the audio thread.
|
|
|
|
/// Refill the output buffer by stepping through all active sounds
|
|
|
|
/// and mixing in their output.
|
|
|
|
fn mix_audio<'a, T>(
|
|
|
|
sound_instances: &mut Arena<SoundInstance>,
|
2020-07-10 22:24:05 +00:00
|
|
|
output_format: &cpal::StreamConfig,
|
|
|
|
mut output_buffer: &mut [T],
|
2019-09-19 00:55:07 +00:00
|
|
|
) where
|
|
|
|
T: 'a + cpal::Sample + Default + sample::Sample,
|
|
|
|
T::Signed: sample::conv::FromSample<i16>,
|
|
|
|
{
|
|
|
|
use sample::{
|
|
|
|
frame::{Frame, Stereo},
|
|
|
|
Sample,
|
|
|
|
};
|
|
|
|
use std::ops::DerefMut;
|
|
|
|
|
|
|
|
// For each sample, mix the samples from all active sound instances.
|
|
|
|
for buf_frame in output_buffer
|
|
|
|
.deref_mut()
|
|
|
|
.chunks_exact_mut(output_format.channels.into())
|
|
|
|
{
|
|
|
|
let mut output_frame = Stereo::<T::Signed>::equilibrium();
|
|
|
|
for (_, sound) in sound_instances.iter_mut() {
|
|
|
|
if sound.active && !sound.signal.is_exhausted() {
|
|
|
|
let sound_frame = sound.signal.next();
|
|
|
|
let sound_frame: Stereo<T::Signed> = sound_frame.map(Sample::to_sample);
|
|
|
|
output_frame = output_frame.add_amp(sound_frame);
|
|
|
|
} else {
|
|
|
|
sound.active = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (buf_sample, output_sample) in buf_frame.iter_mut().zip(output_frame.iter()) {
|
|
|
|
*buf_sample = output_sample.to_sample();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all dead sounds.
|
|
|
|
sound_instances.retain(|_, sound| sound.active);
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
impl AudioBackend for CpalAudioBackend {
|
2020-06-17 18:20:24 +00:00
|
|
|
fn register_sound(&mut self, swf_sound: &swf::Sound) -> Result<SoundHandle, Error> {
|
2019-10-30 01:27:29 +00:00
|
|
|
// Slice off latency seek for MP3 data.
|
|
|
|
let (skip_sample_frames, data) = if swf_sound.format.compression == AudioCompression::Mp3 {
|
|
|
|
let skip_sample_frames =
|
|
|
|
u16::from(swf_sound.data[0]) | (u16::from(swf_sound.data[1]) << 8);
|
|
|
|
(skip_sample_frames, &swf_sound.data[2..])
|
|
|
|
} else {
|
|
|
|
(0, &swf_sound.data[..])
|
|
|
|
};
|
|
|
|
|
2019-08-26 23:38:37 +00:00
|
|
|
let sound = Sound {
|
|
|
|
format: swf_sound.format.clone(),
|
2019-10-30 01:27:29 +00:00
|
|
|
data: Arc::new(data.to_vec()),
|
|
|
|
num_sample_frames: swf_sound.num_samples,
|
|
|
|
skip_sample_frames,
|
2019-08-26 23:38:37 +00:00
|
|
|
};
|
|
|
|
Ok(self.sounds.insert(sound))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_stream(
|
|
|
|
&mut self,
|
|
|
|
clip_id: swf::CharacterId,
|
2019-10-29 01:12:44 +00:00
|
|
|
_clip_frame: u16,
|
2019-10-28 08:48:14 +00:00
|
|
|
clip_data: SwfSlice,
|
2019-08-26 23:38:37 +00:00
|
|
|
stream_info: &swf::SoundStreamHead,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<AudioStreamHandle, Error> {
|
2019-08-26 23:38:37 +00:00
|
|
|
let format = &stream_info.stream_format;
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
// The audio data for stream sounds is distributed among the frames of a
|
|
|
|
// movie clip. The stream tag reader will parse through the SWF and
|
|
|
|
// feed the decoder audio data on the fly.
|
2020-06-17 18:20:24 +00:00
|
|
|
let signal = self.make_signal_from_stream(format, clip_data)?;
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
2020-06-17 18:20:24 +00:00
|
|
|
let handle = sound_instances.insert(SoundInstance {
|
2019-09-19 00:55:07 +00:00
|
|
|
handle: None,
|
|
|
|
clip_id: Some(clip_id),
|
|
|
|
signal,
|
|
|
|
active: true,
|
2020-06-17 18:20:24 +00:00
|
|
|
});
|
|
|
|
Ok(handle)
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 01:12:44 +00:00
|
|
|
fn stop_stream(&mut self, stream: AudioStreamHandle) {
|
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
|
|
|
sound_instances.remove(stream);
|
|
|
|
}
|
|
|
|
|
2020-01-02 03:33:21 +00:00
|
|
|
fn start_sound(
|
|
|
|
&mut self,
|
|
|
|
sound_handle: SoundHandle,
|
|
|
|
settings: &swf::SoundInfo,
|
2020-06-17 18:20:24 +00:00
|
|
|
) -> Result<SoundInstanceHandle, Error> {
|
2019-09-19 00:55:07 +00:00
|
|
|
let sound = &self.sounds[sound_handle];
|
2019-09-19 05:52:24 +00:00
|
|
|
let data = Cursor::new(VecAsRef(Arc::clone(&sound.data)));
|
2019-09-19 00:55:07 +00:00
|
|
|
// Create a signal that decodes and resamples the sound.
|
2019-10-30 01:27:29 +00:00
|
|
|
let signal = if sound.skip_sample_frames == 0
|
|
|
|
&& settings.in_sample.is_none()
|
2019-09-19 05:52:24 +00:00
|
|
|
&& settings.out_sample.is_none()
|
|
|
|
&& settings.num_loops <= 1
|
|
|
|
&& settings.envelope.is_none()
|
|
|
|
{
|
|
|
|
// For simple event sounds, just use the same signal as streams.
|
2020-06-17 18:20:24 +00:00
|
|
|
self.make_signal_from_simple_event_sound(&sound.format, data)?
|
2019-09-19 05:52:24 +00:00
|
|
|
} else {
|
|
|
|
// For event sounds with envelopes/other properties, wrap it in `EventSoundSignal`.
|
2020-06-17 18:20:24 +00:00
|
|
|
self.make_signal_from_event_sound(&sound, settings, data)?
|
2019-09-19 05:52:24 +00:00
|
|
|
};
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
// Add sound instance to active list.
|
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
2020-06-17 18:20:24 +00:00
|
|
|
let handle = sound_instances.insert(SoundInstance {
|
2019-09-19 00:55:07 +00:00
|
|
|
handle: Some(sound_handle),
|
|
|
|
clip_id: None,
|
|
|
|
signal,
|
|
|
|
active: true,
|
2020-06-17 18:20:24 +00:00
|
|
|
});
|
|
|
|
Ok(handle)
|
2020-01-02 03:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn stop_sound(&mut self, sound: SoundInstanceHandle) {
|
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
|
|
|
sound_instances.remove(sound);
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 07:21:22 +00:00
|
|
|
fn stop_all_sounds(&mut self) {
|
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
|
|
|
sound_instances.clear();
|
|
|
|
}
|
|
|
|
|
2019-09-27 17:23:53 +00:00
|
|
|
fn stop_sounds_with_handle(&mut self, handle: SoundHandle) {
|
|
|
|
let mut sound_instances = self.sound_instances.lock().unwrap();
|
|
|
|
let handle = Some(handle);
|
2019-09-27 19:30:29 +00:00
|
|
|
sound_instances.retain(|_, instance| instance.handle != handle);
|
2019-09-27 17:23:53 +00:00
|
|
|
}
|
|
|
|
|
2020-01-03 02:23:58 +00:00
|
|
|
fn get_sound_duration(&self, sound: SoundHandle) -> Option<u32> {
|
|
|
|
if let Some(sound) = self.sounds.get(sound) {
|
|
|
|
// AS duration does not subtract skip_sample_frames.
|
|
|
|
let num_sample_frames = u64::from(sound.num_sample_frames);
|
|
|
|
let ms = num_sample_frames * 1000 / u64::from(sound.format.sample_rate);
|
|
|
|
Some(ms as u32)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-27 17:23:53 +00:00
|
|
|
fn is_sound_playing_with_handle(&mut self, handle: SoundHandle) -> bool {
|
|
|
|
let sound_instances = self.sound_instances.lock().unwrap();
|
|
|
|
let handle = Some(handle);
|
|
|
|
sound_instances
|
|
|
|
.iter()
|
|
|
|
.any(|(_, instance)| instance.handle == handle && instance.active)
|
|
|
|
}
|
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
fn tick(&mut self) {}
|
|
|
|
}
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2019-09-19 05:52:24 +00:00
|
|
|
/// A dummy wrapper struct to implement `AsRef<[u8]>` for `Arc<Vec<u8>`.
|
|
|
|
/// Not having this trait causes problems when trying to use `Cursor<Vec<u8>>`.
|
2019-09-19 00:55:07 +00:00
|
|
|
struct VecAsRef(Arc<Vec<u8>>);
|
2019-08-26 23:38:37 +00:00
|
|
|
|
2019-09-19 00:55:07 +00:00
|
|
|
impl AsRef<[u8]> for VecAsRef {
|
2019-08-26 23:38:37 +00:00
|
|
|
#[inline]
|
2019-09-19 00:55:07 +00:00
|
|
|
fn as_ref(&self) -> &[u8] {
|
|
|
|
&self.0
|
2019-08-26 23:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-19 05:52:24 +00:00
|
|
|
|
|
|
|
impl Default for VecAsRef {
|
|
|
|
fn default() -> Self {
|
|
|
|
VecAsRef(Arc::new(vec![]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A signal for event sound instances using sound settings (looping, start/end point, envelope).
|
|
|
|
struct EventSoundSignal {
|
|
|
|
decoder: Box<dyn SeekableDecoder + Send>,
|
|
|
|
num_loops: u16,
|
2019-10-30 20:12:05 +00:00
|
|
|
envelope_signal: Option<EnvelopeSignal>,
|
2019-09-19 05:52:24 +00:00
|
|
|
start_sample_frame: u32,
|
|
|
|
end_sample_frame: Option<u32>,
|
|
|
|
cur_sample_frame: u32,
|
|
|
|
is_exhausted: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl EventSoundSignal {
|
|
|
|
fn new_with_settings(
|
|
|
|
decoder: Box<dyn SeekableDecoder + Send>,
|
|
|
|
settings: &swf::SoundInfo,
|
2019-10-30 01:27:29 +00:00
|
|
|
num_sample_frames: u32,
|
|
|
|
skip_sample_frames: u16,
|
2019-09-19 05:52:24 +00:00
|
|
|
) -> Self {
|
2019-10-30 01:27:29 +00:00
|
|
|
let skip_sample_frames = u32::from(skip_sample_frames);
|
2019-09-19 05:52:24 +00:00
|
|
|
let sample_divisor = 44100 / u32::from(decoder.sample_rate());
|
2019-10-30 01:27:29 +00:00
|
|
|
let start_sample_frame =
|
|
|
|
settings.in_sample.unwrap_or(0) / sample_divisor + skip_sample_frames;
|
|
|
|
let end_sample_frame = settings
|
|
|
|
.out_sample
|
|
|
|
.map(|n| n / sample_divisor)
|
|
|
|
.unwrap_or(num_sample_frames)
|
|
|
|
+ skip_sample_frames;
|
2019-10-30 20:12:05 +00:00
|
|
|
|
|
|
|
let envelope_signal = if let Some(envelope) = &settings.envelope {
|
|
|
|
Some(EnvelopeSignal::new(envelope.clone()))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2019-09-19 05:52:24 +00:00
|
|
|
let mut signal = Self {
|
|
|
|
decoder,
|
|
|
|
num_loops: settings.num_loops,
|
2019-10-30 20:12:05 +00:00
|
|
|
envelope_signal,
|
2019-09-19 05:52:24 +00:00
|
|
|
start_sample_frame,
|
2019-10-30 01:27:29 +00:00
|
|
|
end_sample_frame: Some(end_sample_frame),
|
2019-09-19 05:52:24 +00:00
|
|
|
cur_sample_frame: start_sample_frame,
|
|
|
|
is_exhausted: false,
|
|
|
|
};
|
|
|
|
signal.next_loop();
|
|
|
|
signal
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl EventSoundSignal {
|
|
|
|
/// Resets the decoder to the start point of the loop.
|
|
|
|
fn next_loop(&mut self) {
|
|
|
|
if self.num_loops > 0 {
|
|
|
|
self.num_loops -= 1;
|
|
|
|
self.decoder.seek_to_sample_frame(self.start_sample_frame);
|
|
|
|
self.cur_sample_frame = self.start_sample_frame;
|
|
|
|
} else {
|
|
|
|
self.is_exhausted = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl sample::signal::Signal for EventSoundSignal {
|
|
|
|
type Frame = [i16; 2];
|
|
|
|
|
|
|
|
fn next(&mut self) -> Self::Frame {
|
|
|
|
// Loop the sound if necessary, and get the next frame.
|
|
|
|
if !self.is_exhausted {
|
2019-10-30 20:12:05 +00:00
|
|
|
let frame = if let Some(frame) = self.decoder.next() {
|
2019-09-19 05:52:24 +00:00
|
|
|
self.cur_sample_frame += 1;
|
|
|
|
if let Some(end) = self.end_sample_frame {
|
|
|
|
if self.cur_sample_frame > end {
|
|
|
|
self.next_loop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
frame
|
|
|
|
} else {
|
|
|
|
self.next_loop();
|
|
|
|
self.next()
|
2019-10-30 20:12:05 +00:00
|
|
|
};
|
|
|
|
if let Some(envelope) = &mut self.envelope_signal {
|
|
|
|
use sample::frame::Frame;
|
|
|
|
frame.mul_amp(envelope.next())
|
|
|
|
} else {
|
|
|
|
frame
|
2019-09-19 05:52:24 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
[0, 0]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_exhausted(&self) -> bool {
|
|
|
|
self.is_exhausted
|
|
|
|
}
|
|
|
|
}
|
2019-10-30 20:12:05 +00:00
|
|
|
|
|
|
|
/// A signal that represents the sound envelope for an event sound.
|
|
|
|
/// The sound signal gets multiplied by the envelope for volume/panning effects.
|
|
|
|
struct EnvelopeSignal {
|
|
|
|
/// Iterator through the envelope points specified in the SWWF file.
|
|
|
|
envelope: std::vec::IntoIter<swf::SoundEnvelopePoint>,
|
|
|
|
|
|
|
|
/// The starting envelope point.
|
|
|
|
prev_point: swf::SoundEnvelopePoint,
|
|
|
|
|
|
|
|
/// The ending envelope point.
|
|
|
|
next_point: swf::SoundEnvelopePoint,
|
|
|
|
|
|
|
|
/// The current sample index.
|
|
|
|
cur_sample: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl EnvelopeSignal {
|
|
|
|
fn new(envelope: swf::SoundEnvelope) -> Self {
|
|
|
|
// TODO: This maybe can be done more clever using the `sample` crate.
|
|
|
|
let mut envelope = envelope.into_iter();
|
|
|
|
let first_point = envelope.next().unwrap_or_else(|| swf::SoundEnvelopePoint {
|
|
|
|
sample: 0,
|
|
|
|
left_volume: 1.0,
|
|
|
|
right_volume: 1.0,
|
|
|
|
});
|
|
|
|
Self {
|
|
|
|
// The initial volume is the first point's volume.
|
|
|
|
prev_point: swf::SoundEnvelopePoint {
|
|
|
|
sample: 0,
|
|
|
|
left_volume: first_point.left_volume,
|
|
|
|
right_volume: first_point.right_volume,
|
|
|
|
},
|
|
|
|
next_point: first_point,
|
|
|
|
cur_sample: 0,
|
|
|
|
envelope,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl sample::signal::Signal for EnvelopeSignal {
|
|
|
|
type Frame = [f32; 2];
|
|
|
|
|
|
|
|
fn next(&mut self) -> Self::Frame {
|
|
|
|
// Calculate interpolated volume.
|
|
|
|
let out = if self.prev_point.sample < self.next_point.sample {
|
|
|
|
let a = f64::from(self.cur_sample - self.prev_point.sample);
|
|
|
|
let b = f64::from(self.next_point.sample - self.prev_point.sample);
|
|
|
|
let lerp = a / b;
|
|
|
|
let interpolator = sample::interpolate::Linear::new(
|
|
|
|
[self.prev_point.left_volume, self.prev_point.right_volume],
|
|
|
|
[self.next_point.left_volume, self.next_point.right_volume],
|
|
|
|
);
|
|
|
|
use sample::interpolate::Interpolator;
|
|
|
|
interpolator.interpolate(lerp)
|
|
|
|
} else {
|
|
|
|
[self.next_point.left_volume, self.next_point.right_volume]
|
|
|
|
};
|
|
|
|
|
|
|
|
// Update envelope endpoints.
|
|
|
|
self.cur_sample = self.cur_sample.saturating_add(1);
|
|
|
|
while self.cur_sample > self.next_point.sample {
|
|
|
|
self.prev_point = self.next_point.clone();
|
|
|
|
self.next_point =
|
|
|
|
self.envelope
|
|
|
|
.next()
|
|
|
|
.clone()
|
|
|
|
.unwrap_or_else(|| swf::SoundEnvelopePoint {
|
|
|
|
sample: std::u32::MAX,
|
|
|
|
left_volume: self.prev_point.left_volume,
|
|
|
|
right_volume: self.prev_point.right_volume,
|
|
|
|
});
|
|
|
|
|
|
|
|
if self.prev_point.sample > self.next_point.sample {
|
|
|
|
self.next_point.sample = self.prev_point.sample;
|
|
|
|
log::error!("Invalid sound envelope; sample indices are out of order");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_exhausted(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|