audio: Implement `start_substream`.
This also removes `append_substream` since `Substream` now handles that internally. This requires duplicating a huge chunk of the decoder code to work with `Buffer` types instead of `SwfMovie` types. Eventually we'll want to wipe out all the SWF-specific stuff and use `Buffer` exclusively.
This commit is contained in:
parent
21cedcee2e
commit
4b5398e122
|
@ -84,25 +84,16 @@ pub trait AudioBackend: Downcast {
|
|||
/// seek offset data at the head of each chunk (SWF19 p.184). This is used
|
||||
/// to allow MP3 data to be buffered across multiple chunks or frames as
|
||||
/// necessary for playback.
|
||||
///
|
||||
/// The sound instance constructed by this function explicitly supports
|
||||
/// streaming additional data to it by appending chunks to the underlying
|
||||
/// `Substream`, if the sound is still playing.
|
||||
fn start_substream(
|
||||
&mut self,
|
||||
stream_data: Substream,
|
||||
handle: &swf::SoundStreamHead,
|
||||
stream_info: &swf::SoundStreamHead,
|
||||
) -> Result<SoundInstanceHandle, DecodeError>;
|
||||
|
||||
/// Adds data to an already playing stream sound started with
|
||||
/// `start_substream`.
|
||||
///
|
||||
/// If a sound instance has not yet finished playing, then it will act as
|
||||
/// if the appended data was originally provided at the time of the
|
||||
/// `start_substream` call. Otherwise, the append operation will fail and
|
||||
/// the caller will need to start another substream sound.
|
||||
fn append_substream(
|
||||
&mut self,
|
||||
instance: SoundInstanceHandle,
|
||||
addl_data: Substream,
|
||||
) -> Result<(), DecodeError>;
|
||||
|
||||
/// Stops a playing sound instance.
|
||||
/// No-op if the sound is not playing.
|
||||
fn stop_sound(&mut self, sound: SoundInstanceHandle);
|
||||
|
@ -164,6 +155,11 @@ pub trait AudioBackend: Downcast {
|
|||
|
||||
/// Returns the last whole window of output samples.
|
||||
fn get_sample_history(&self) -> [[f32; 2]; 1024];
|
||||
|
||||
/// Determine if a sound is still playing.
|
||||
fn is_sound_playing(&self, instance: SoundInstanceHandle) -> bool {
|
||||
self.get_sound_duration(instance).is_some()
|
||||
}
|
||||
}
|
||||
|
||||
impl_downcast!(AudioBackend);
|
||||
|
@ -257,14 +253,6 @@ impl AudioBackend for NullAudioBackend {
|
|||
Ok(SoundInstanceHandle::from_raw_parts(0, 0))
|
||||
}
|
||||
|
||||
fn append_substream(
|
||||
&mut self,
|
||||
_instance: SoundInstanceHandle,
|
||||
_addl_data: Substream,
|
||||
) -> Result<(), DecodeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn stop_sound(&mut self, _sound: SoundInstanceHandle) {}
|
||||
|
||||
fn stop_all_sounds(&mut self) {}
|
||||
|
|
|
@ -14,6 +14,7 @@ pub use mp3::{mp3_metadata, Mp3Decoder};
|
|||
pub use nellymoser::NellymoserDecoder;
|
||||
pub use pcm::PcmDecoder;
|
||||
|
||||
use crate::buffer::{Slice, SliceCursor, Substream, SubstreamChunksIter};
|
||||
use crate::tag_utils::{ControlFlow, SwfSlice};
|
||||
use std::io::{Cursor, Read};
|
||||
use swf::{AudioCompression, SoundFormat, TagCode};
|
||||
|
@ -235,9 +236,6 @@ pub trait SeekableDecoder: Decoder {
|
|||
}
|
||||
}
|
||||
|
||||
/// `StreamTagReader` reads through the SWF tag data of a `MovieClip`, extracting
|
||||
/// audio data from the `SoundStreamBlock` tags. It can be used as an `Iterator` that
|
||||
/// will return consecutive slices of the underlying audio data.
|
||||
/// `StreamTagReader` reads through the SWF tag data of a `MovieClip`, extracting
|
||||
/// audio data from the `SoundStreamBlock` tags. It can be used as an `Iterator` that
|
||||
/// will return consecutive slices of the underlying audio data.
|
||||
|
@ -370,3 +368,224 @@ pub struct Mp3Metadata {
|
|||
pub sample_rate: u16,
|
||||
pub num_sample_frames: u32,
|
||||
}
|
||||
|
||||
/// `SubstreamTagReader` reads through the data chunks of a `Substream` with
|
||||
/// audio data in it. Data is assumed to have already been extracted from its
|
||||
/// container.
|
||||
///
|
||||
/// MP3 data will be further joined into properly-sized chunks.
|
||||
struct SubstreamTagReader {
|
||||
/// The tag data of the `MovieClip` that contains the streaming audio track.
|
||||
data_stream: SubstreamChunksIter,
|
||||
|
||||
/// The compressed audio data in the most recent `SoundStreamBlock` we've seen, returned by `Iterator::next`.
|
||||
current_audio_data: Option<Slice>,
|
||||
|
||||
/// The compression used by the audio data.
|
||||
compression: AudioCompression,
|
||||
|
||||
/// The number of audio samples for use in future animation frames.
|
||||
///
|
||||
/// Only used in MP3 encoding to properly handle gaps in the audio track.
|
||||
mp3_samples_buffered: i32,
|
||||
|
||||
/// The ideal number of audio samples in each animation frame, i.e. the sample rate divided by frame rate.
|
||||
///
|
||||
/// Only used in MP3 encoding to properly handle gaps in the audio track.
|
||||
_mp3_samples_per_block: u16,
|
||||
}
|
||||
|
||||
impl SubstreamTagReader {
|
||||
/// Builds a new `SubstreamTagReader` from the given `Substream`.
|
||||
///
|
||||
/// `Substream` should reference audio data.
|
||||
fn new(stream_info: &swf::SoundStreamHead, data_stream: Substream) -> Self {
|
||||
Self {
|
||||
data_stream: data_stream.iter_chunks(),
|
||||
compression: stream_info.stream_format.compression,
|
||||
current_audio_data: None,
|
||||
mp3_samples_buffered: 0,
|
||||
_mp3_samples_per_block: stream_info.num_samples_per_block,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for SubstreamTagReader {
|
||||
type Item = Slice;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let compression = self.compression;
|
||||
let next_chunk = self.data_stream.next()?;
|
||||
let data = next_chunk.data();
|
||||
|
||||
let mut audio_block = &data[..];
|
||||
if compression == AudioCompression::Mp3 && next_chunk.len() >= 4 {
|
||||
// MP3s deliver audio in frames of 576 samples, which means we may have SoundStreamBlocks with
|
||||
// lots of extra samples, followed by a block with 0 samples. Worse, there may be frames without
|
||||
// blocks at all despite SWF19 saying this shouldn't happen. This may or may not indicate a gap
|
||||
// in the audio depending on the number of empty frames.
|
||||
// Keep a tally of the # of samples we've seen compared to the number of samples that will be
|
||||
// played in each timeline frame. Only stop an MP3 sound if we've exhausted all of the samples.
|
||||
// RESEARCHME: How does Flash Player actually determine when there is an audio gap or not?
|
||||
// If an MP3 audio track has gaps, Flash Player will often play it out of sync (too early).
|
||||
// Seems closely related to `stream_info.num_samples_per_block`.
|
||||
let num_samples =
|
||||
u16::from_le_bytes(audio_block[..2].try_into().expect("2 bytes fit into a u16"));
|
||||
self.mp3_samples_buffered += i32::from(num_samples);
|
||||
audio_block = &audio_block[4..];
|
||||
}
|
||||
self.current_audio_data = Some(next_chunk.to_subslice(audio_block));
|
||||
|
||||
// TODO: per-frame sample tracking. `Substream` does not have a notion
|
||||
// of a 'frame'. However, SWF audio streams end if there's no audio
|
||||
// data in a frame, EXCEPT for MP3 which is allowed to straddle frames,
|
||||
// have empty frames, etc. Non-MP3 stops at the first chunk.
|
||||
self.current_audio_data.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an `Reader` that reads through SWF tags and returns slices of any
|
||||
/// audio stream data for `SoundStreamBlock` tags.
|
||||
impl Read for SubstreamTagReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
if self.current_audio_data.is_none() && self.next().is_none() {
|
||||
//next() fills current_audio_data
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let chunk = self.current_audio_data.as_mut().unwrap();
|
||||
let data = chunk.data();
|
||||
|
||||
//At this point, current_audio_data should be full
|
||||
let len = std::cmp::min(buf.len(), data.len());
|
||||
buf[..len].copy_from_slice(&data[..len]);
|
||||
|
||||
drop(data);
|
||||
*chunk = chunk.to_start_and_end(chunk.start() + len, chunk.end());
|
||||
|
||||
if chunk.is_empty() {
|
||||
self.current_audio_data = None;
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new decoder that reads data from a shared `Substream` instance.
|
||||
/// This works similarly to `make_stream_decoder` but using the new buffer
|
||||
/// infrastructure that will eventually replace SWF-specific streaming.
|
||||
///
|
||||
/// The substream is shared in order to allow appending additional data into
|
||||
/// the stream.
|
||||
pub fn make_substream_decoder(
|
||||
stream_info: &swf::SoundStreamHead,
|
||||
data_stream: Substream,
|
||||
) -> Result<Box<dyn Decoder + Send>, Error> {
|
||||
let decoder: Box<dyn Decoder + Send> =
|
||||
if stream_info.stream_format.compression == AudioCompression::Adpcm {
|
||||
Box::new(AdpcmSubstreamDecoder::new(stream_info, data_stream)?)
|
||||
} else {
|
||||
Box::new(StandardSubstreamDecoder::new(stream_info, data_stream)?)
|
||||
};
|
||||
Ok(decoder)
|
||||
}
|
||||
|
||||
/// The `StandardSubstreamDecoder` takes care of reading the audio data from
|
||||
/// the chunks of a `Substream` and feeding it to the decoder.
|
||||
struct StandardSubstreamDecoder {
|
||||
/// The underlying decoder. The decoder will get its data from a `Substream`.
|
||||
decoder: Box<dyn Decoder>,
|
||||
}
|
||||
|
||||
impl StandardSubstreamDecoder {
|
||||
/// Constructs a new `StandardSubstreamDecoder`.
|
||||
/// `swf_data` should be the tag data of the MovieClip that contains the stream.
|
||||
fn new(stream_info: &swf::SoundStreamHead, data_stream: Substream) -> Result<Self, Error> {
|
||||
// Create a tag reader to get the audio data from SoundStreamBlock tags.
|
||||
let tag_reader = SubstreamTagReader::new(stream_info, data_stream);
|
||||
// Wrap the tag reader in the decoder.
|
||||
let decoder = make_decoder(&stream_info.stream_format, tag_reader)?;
|
||||
Ok(Self { decoder })
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for StandardSubstreamDecoder {
|
||||
fn num_channels(&self) -> u8 {
|
||||
self.decoder.num_channels()
|
||||
}
|
||||
fn sample_rate(&self) -> u16 {
|
||||
self.decoder.sample_rate()
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for StandardSubstreamDecoder {
|
||||
type Item = [i16; 2];
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.decoder.next()
|
||||
}
|
||||
}
|
||||
|
||||
/// Stream sounds encoded with ADPCM have an ADPCM header in each `SoundStreamBlock` tag, unlike
|
||||
/// other compression formats that remain the same as if they were a single sound clip.
|
||||
/// Therefore, we must recreate the decoder with each `SoundStreamBlock` to parse the additional
|
||||
/// headers.
|
||||
pub struct AdpcmSubstreamDecoder {
|
||||
format: SoundFormat,
|
||||
tag_reader: SubstreamTagReader,
|
||||
decoder: AdpcmDecoder<SliceCursor>,
|
||||
}
|
||||
|
||||
impl AdpcmSubstreamDecoder {
|
||||
fn new(stream_info: &swf::SoundStreamHead, data_stream: Substream) -> Result<Self, Error> {
|
||||
let empty_buffer = data_stream.buffer().to_empty_slice();
|
||||
let mut tag_reader = SubstreamTagReader::new(stream_info, data_stream);
|
||||
let audio_data = tag_reader.next().unwrap_or(empty_buffer);
|
||||
let decoder = AdpcmDecoder::new(
|
||||
audio_data.as_cursor(),
|
||||
stream_info.stream_format.is_stereo,
|
||||
stream_info.stream_format.sample_rate,
|
||||
)?;
|
||||
Ok(Self {
|
||||
format: stream_info.stream_format.clone(),
|
||||
tag_reader,
|
||||
decoder,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for AdpcmSubstreamDecoder {
|
||||
fn num_channels(&self) -> u8 {
|
||||
self.decoder.num_channels()
|
||||
}
|
||||
fn sample_rate(&self) -> u16 {
|
||||
self.decoder.sample_rate()
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for AdpcmSubstreamDecoder {
|
||||
type Item = [i16; 2];
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(sample_frame) = self.decoder.next() {
|
||||
// Return sample frames until the decoder has exhausted
|
||||
// the SoundStreamBlock tag.
|
||||
Some(sample_frame)
|
||||
} else if let Some(audio_data) = self.tag_reader.next() {
|
||||
// We've reached the end of the sound stream block tag, so
|
||||
// read the next one and recreate the decoder.
|
||||
// `AdpcmDecoder` read the ADPCM header when it is created.
|
||||
self.decoder = AdpcmDecoder::new(
|
||||
audio_data.as_cursor(),
|
||||
self.format.is_stereo,
|
||||
self.format.sample_rate,
|
||||
)
|
||||
.ok()?;
|
||||
self.decoder.next()
|
||||
} else {
|
||||
// No more SoundStreamBlock tags.
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use super::decoders::{self, AdpcmDecoder, Decoder, PcmDecoder, SeekableDecoder};
|
||||
use super::{SoundHandle, SoundInstanceHandle, SoundTransform};
|
||||
use crate::backend::audio::{DecodeError, RegisterError};
|
||||
use crate::buffer::Substream;
|
||||
use crate::tag_utils::SwfSlice;
|
||||
use generational_arena::Arena;
|
||||
use std::io::Cursor;
|
||||
|
@ -211,6 +212,8 @@ impl SoundInstance {
|
|||
}
|
||||
|
||||
/// Creates a new `SoundInstance` from a `Stream`, for stream sounds.
|
||||
///
|
||||
/// Substream-backed sounds also use this.
|
||||
fn new_stream(stream: Box<dyn Stream>) -> Self {
|
||||
SoundInstance {
|
||||
handle: None,
|
||||
|
@ -407,6 +410,20 @@ impl AudioMixer {
|
|||
Ok(stream)
|
||||
}
|
||||
|
||||
fn make_stream_from_buffer_substream(
|
||||
&self,
|
||||
stream_info: &swf::SoundStreamHead,
|
||||
data_stream: Substream,
|
||||
) -> Result<Box<dyn Stream>, DecodeError> {
|
||||
// Instantiate a decoder for the compression that the sound data uses.
|
||||
let clip_stream_decoder = decoders::make_substream_decoder(stream_info, data_stream)?;
|
||||
|
||||
// Convert the `Decoder` to a `Stream`, and resample it to the output sample rate.
|
||||
let stream = DecoderStream::new(clip_stream_decoder);
|
||||
let stream = Box::new(self.make_resampler(stream));
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
/// Callback to the audio thread.
|
||||
/// Refill the output buffer by stepping through all active sounds
|
||||
/// and mixing in their output.
|
||||
|
@ -587,6 +604,25 @@ impl AudioMixer {
|
|||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Starts a `Substream` backed audio stream.
|
||||
pub fn start_substream(
|
||||
&mut self,
|
||||
stream_data: Substream,
|
||||
stream_info: &swf::SoundStreamHead,
|
||||
) -> Result<SoundInstanceHandle, DecodeError> {
|
||||
// The audio data for substream sounds is already de-multiplexed by the
|
||||
// caller. The substream tag reader will feed the decoder audio data
|
||||
// from each chunk.
|
||||
let stream = self.make_stream_from_buffer_substream(stream_info, stream_data)?;
|
||||
|
||||
let mut sound_instances = self
|
||||
.sound_instances
|
||||
.lock()
|
||||
.expect("Cannot be called reentrant");
|
||||
let handle = sound_instances.insert(SoundInstance::new_stream(stream));
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Stops a playing sound instance.
|
||||
pub fn stop_sound(&mut self, sound: SoundInstanceHandle) {
|
||||
let mut sound_instances = self
|
||||
|
@ -1071,6 +1107,15 @@ macro_rules! impl_audio_mixer_backend {
|
|||
self.$mixer.start_sound(sound_handle, settings)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn start_substream(
|
||||
&mut self,
|
||||
stream_data: ruffle_core::buffer::Substream,
|
||||
stream_info: &swf::SoundStreamHead,
|
||||
) -> Result<SoundInstanceHandle, DecodeError> {
|
||||
self.$mixer.start_substream(stream_data, stream_info)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stop_sound(&mut self, sound: SoundInstanceHandle) {
|
||||
self.$mixer.stop_sound(sound)
|
||||
|
|
Loading…
Reference in New Issue