Rework audio to allow for streaming decoding

This commit is contained in:
Mike Welsh 2019-07-25 00:58:34 -07:00
parent 725cdae6c5
commit e64e306137
16 changed files with 1003 additions and 677 deletions

View File

@ -11,6 +11,8 @@ gc-arena = "0.1.1"
gc-arena-derive = "0.1.1"
generational-arena = "0.2.2"
log = "0.4"
minimp3 = { version = "0.3.3", optional = true }
puremp3 = { version = "0.1", optional = true }
rand = "0.6.5"
swf = { git = "https://github.com/Herschel/swf-rs", rev = "44c9262" }

View File

@ -1,6 +1,4 @@
use crate::backend::audio::AudioBackend;
//use generational_arena::Arena;
use swf::SoundStreamHead;
pub struct Audio {
backend: Box<AudioBackend>,
@ -20,31 +18,40 @@ impl Audio {
self.backend.register_sound(sound)
}
pub fn register_stream(&mut self, stream_info: &SoundStreamHead) -> AudioStreamHandle {
self.backend.register_stream(stream_info)
}
pub fn play_sound(&mut self, sound: SoundHandle) {
self.backend.play_sound(sound)
}
pub fn preload_stream_samples(&mut self, handle: AudioStreamHandle, samples: &[u8]) {
self.backend.preload_stream_samples(handle, samples)
pub fn preload_sound_stream_head(
&mut self,
clip_id: swf::CharacterId,
stream_info: &swf::SoundStreamHead,
) {
self.backend.preload_sound_stream_head(clip_id, stream_info)
}
pub fn preload_stream_finalize(&mut self, handle: AudioStreamHandle) {
self.backend.preload_stream_finalize(handle)
pub fn preload_sound_stream_block(&mut self, clip_id: swf::CharacterId, audio_data: &[u8]) {
self.backend.preload_sound_stream_block(clip_id, audio_data);
}
pub fn start_stream(&mut self, handle: AudioStreamHandle) -> bool {
self.backend.start_stream(handle)
pub fn preload_sound_stream_end(&mut self, clip_id: swf::CharacterId) {
self.backend.preload_sound_stream_end(clip_id);
}
pub fn queue_stream_samples(&mut self, handle: AudioStreamHandle, samples: &[u8]) {
self.backend.queue_stream_samples(handle, samples)
pub fn start_stream(
&mut self,
clip_id: crate::prelude::CharacterId,
clip_data: crate::tag_utils::SwfSlice,
handle: &swf::SoundStreamHead,
) -> AudioStreamHandle {
self.backend.start_stream(clip_id, clip_data, handle)
}
pub fn stop_all_sounds(&mut self) {
// TODO(Herschel)
}
pub fn is_loading_complete(&self) -> bool {
self.backend.is_loading_complete()
}
}

View File

@ -1,7 +1,6 @@
use bitstream_io::{BigEndian, BitReader};
use generational_arena::{Arena, Index};
use std::io::Read;
pub mod decoders;
pub mod swf {
pub use swf::{read, AudioCompression, CharacterId, Sound, SoundFormat, SoundStreamHead};
}
@ -13,14 +12,25 @@ type Error = Box<std::error::Error>;
pub trait AudioBackend {
fn register_sound(&mut self, swf_sound: &swf::Sound) -> Result<SoundHandle, Error>;
fn register_stream(&mut self, stream_info: &swf::SoundStreamHead) -> AudioStreamHandle;
fn play_sound(&mut self, sound: SoundHandle);
fn preload_stream_samples(&mut self, _handle: AudioStreamHandle, _samples: &[u8]) {}
fn preload_stream_finalize(&mut self, _handle: AudioStreamHandle) {}
fn start_stream(&mut self, _handle: AudioStreamHandle) -> bool {
false
fn preload_sound_stream_head(
&mut self,
_clip_id: swf::CharacterId,
_stream_info: &swf::SoundStreamHead,
) {
}
fn preload_sound_stream_block(&mut self, _clip_id: swf::CharacterId, _audio_data: &[u8]) {}
fn preload_sound_stream_end(&mut self, _clip_id: swf::CharacterId) {}
fn play_sound(&mut self, sound: SoundHandle);
fn start_stream(
&mut self,
clip_id: crate::prelude::CharacterId,
clip_data: crate::tag_utils::SwfSlice,
handle: &swf::SoundStreamHead,
) -> AudioStreamHandle;
// TODO: Eventually remove this/move it to library.
fn is_loading_complete(&self) -> bool {
true
}
fn queue_stream_samples(&mut self, handle: AudioStreamHandle, samples: &[u8]);
fn tick(&mut self) {}
}
@ -45,13 +55,14 @@ impl AudioBackend for NullAudioBackend {
fn play_sound(&mut self, _sound: SoundHandle) {}
fn register_stream(&mut self, _stream_info: &swf::SoundStreamHead) -> AudioStreamHandle {
fn start_stream(
&mut self,
_clip_id: crate::prelude::CharacterId,
_clip_data: crate::tag_utils::SwfSlice,
_handle: &swf::SoundStreamHead,
) -> AudioStreamHandle {
self.streams.insert(())
}
fn queue_stream_samples(&mut self, _handle: AudioStreamHandle, _samples: &[u8]) {
// Noop
}
}
impl Default for NullAudioBackend {
@ -59,134 +70,3 @@ impl Default for NullAudioBackend {
NullAudioBackend::new()
}
}
pub struct AdpcmDecoder<R: Read> {
inner: BitReader<R, BigEndian>,
is_stereo: bool,
bits_per_sample: usize,
sample_num: u16,
left_sample: i32,
left_step_index: i16,
left_step: i32,
right_sample: i32,
right_step_index: i16,
right_step: i32,
}
impl<R: Read> AdpcmDecoder<R> {
const INDEX_TABLE: [&'static [i16]; 4] = [
&[-1, 2],
&[-1, -1, 2, 4],
&[-1, -1, -1, -1, 2, 4, 6, 8],
&[-1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16],
];
const STEP_TABLE: [i32; 89] = [
7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60,
66, 73, 80, 88, 97, 107, 118, 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371,
408, 449, 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878,
2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845,
8630, 9493, 10442, 11487, 12635, 13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086,
29794, 32767,
];
pub fn new(inner: R, is_stereo: bool) -> Result<Self, Error> {
let mut reader = BitReader::new(inner);
let bits_per_sample = reader.read::<u8>(2)? as usize + 2;
let left_sample = 0;
let left_step_index = 0;
let left_step = 0;
let right_sample = 0;
let right_step_index = 0;
let right_step = 0;
Ok(Self {
inner: reader,
is_stereo,
bits_per_sample,
sample_num: 0,
left_sample,
left_step,
left_step_index,
right_sample,
right_step,
right_step_index,
})
}
pub fn next_sample(&mut self) -> Result<(i16, i16), Error> {
if self.sample_num == 0 {
// The initial sample values are NOT byte-aligned.
self.left_sample = self.inner.read_signed(16)?;
self.left_step_index = self.inner.read::<u16>(6)? as i16;
self.left_step = Self::STEP_TABLE[self.left_step_index as usize];
if self.is_stereo {
self.right_sample = self.inner.read_signed(16)?;
self.right_step_index = self.inner.read::<u16>(6)? as i16;
self.right_step = Self::STEP_TABLE[self.right_step_index as usize];
}
}
self.sample_num = (self.sample_num + 1) % 4095;
let data: i32 = self.inner.read::<u32>(self.bits_per_sample as u32)? as i32;
self.left_step = Self::STEP_TABLE[self.left_step_index as usize];
// (data + 0.5) * step / 2^(bits_per_sample - 2)
// Data is sign-magnitude, NOT two's complement.
// TODO(Herschel): Other implementations use some bit-tricks for this.
let sign_mask = 1 << (self.bits_per_sample - 1);
let magnitude = data & !sign_mask;
let delta = (2 * magnitude + 1) * self.left_step / sign_mask;
if (data & sign_mask) != 0 {
self.left_sample -= delta;
} else {
self.left_sample += delta;
}
if self.left_sample < -32768 {
self.left_sample = 32768;
} else if self.left_sample > 32767 {
self.left_sample = 32767;
}
let i = magnitude as usize;
self.left_step_index += Self::INDEX_TABLE[self.bits_per_sample - 2][i];
if self.left_step_index < 0 {
self.left_step_index = 0;
} else if self.left_step_index >= Self::STEP_TABLE.len() as i16 {
self.left_step_index = Self::STEP_TABLE.len() as i16 - 1;
}
if self.is_stereo {
let data = self.inner.read::<u32>(self.bits_per_sample as u32)? as i32;
self.right_step = Self::STEP_TABLE[self.right_step_index as usize];
let sign_mask = 1 << (self.bits_per_sample - 1);
let magnitude = data & !sign_mask;
let delta = (2 * magnitude + 1) * self.right_step / sign_mask;
if (data & sign_mask) != 0 {
self.right_sample -= delta;
} else {
self.right_sample += delta;
}
if self.right_sample < -32768 {
self.right_sample = 32768;
} else if self.right_sample > 32767 {
self.right_sample = 32767;
}
let i = magnitude as usize;
self.right_step_index += Self::INDEX_TABLE[self.bits_per_sample - 2][i];
if self.right_step_index < 0 {
self.right_step_index = 0;
} else if self.right_step_index >= Self::STEP_TABLE.len() as i16 {
self.right_step_index = Self::STEP_TABLE.len() as i16 - 1;
}
Ok((self.left_sample as i16, self.right_sample as i16))
} else {
Ok((self.left_sample as i16, self.left_sample as i16))
}
}
}

View File

@ -0,0 +1,76 @@
mod adpcm;
mod mp3;
pub use adpcm::AdpcmDecoder;
pub use mp3::Mp3Decoder;
pub trait Decoder: Iterator<Item = i16> {
fn num_channels(&self) -> u8;
fn sample_rate(&self) -> u16;
}
pub fn stream_tag_reader(
swf_data: crate::tag_utils::SwfSlice,
) -> IterRead<impl Iterator<Item = u8>> {
use std::io::{Cursor, Read};
use swf::TagCode;
let mut reader = swf::read::Reader::new(Cursor::new(swf_data), 8);
let mut audio_data = vec![];
let mut cur_byte = 0;
let mut frame = 1;
let iter = std::iter::from_fn(move || {
if cur_byte >= audio_data.len() {
cur_byte = 0;
let tag_callback =
|reader: &mut swf::read::Reader<Cursor<crate::tag_utils::SwfSlice>>,
tag_code,
tag_len| match tag_code {
TagCode::ShowFrame => {
frame += 1;
Ok(())
}
TagCode::SoundStreamBlock => {
audio_data.clear();
let mut data = vec![];
reader
.get_mut()
.take(tag_len as u64)
.read_to_end(&mut data)?;
audio_data.extend(data[4..].iter());
Ok(())
}
_ => Ok(()),
};
let _ =
crate::tag_utils::decode_tags(&mut reader, tag_callback, TagCode::SoundStreamBlock);
}
if cur_byte < audio_data.len() {
let byte = audio_data[cur_byte];
cur_byte += 1;
Some(byte)
} else {
None
}
});
IterRead(iter)
}
pub struct IterRead<I: Iterator<Item = u8>>(I);
impl<I: Iterator<Item = u8>> std::io::Read for IterRead<I> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let mut n = 0;
for out in buf {
if let Some(v) = self.0.next() {
*out = v;
n += 1;
} else {
break;
}
}
Ok(n)
}
}

View File

@ -0,0 +1,172 @@
use super::Decoder;
use bitstream_io::{BigEndian, BitReader};
use std::io::Read;
pub struct AdpcmDecoder<R: Read> {
inner: BitReader<R, BigEndian>,
sample_rate: u16,
is_stereo: bool,
bits_per_sample: usize,
sample_num: u16,
left_sample: i32,
left_step_index: i16,
left_step: i32,
right_sample: i32,
right_step_index: i16,
right_step: i32,
cur_channel: u8,
}
impl<R: Read> AdpcmDecoder<R> {
const INDEX_TABLE: [&'static [i16]; 4] = [
&[-1, 2],
&[-1, -1, 2, 4],
&[-1, -1, -1, -1, 2, 4, 6, 8],
&[-1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16],
];
const STEP_TABLE: [i32; 89] = [
7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60,
66, 73, 80, 88, 97, 107, 118, 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371,
408, 449, 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878,
2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845,
8630, 9493, 10442, 11487, 12635, 13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086,
29794, 32767,
];
pub fn new(inner: R, is_stereo: bool, sample_rate: u16) -> Result<Self, std::io::Error> {
let mut reader = BitReader::new(inner);
let bits_per_sample = reader.read::<u8>(2)? as usize + 2;
let left_sample = 0;
let left_step_index = 0;
let left_step = 0;
let right_sample = 0;
let right_step_index = 0;
let right_step = 0;
Ok(Self {
inner: reader,
sample_rate,
is_stereo,
bits_per_sample,
sample_num: 0,
left_sample,
left_step,
left_step_index,
right_sample,
right_step,
right_step_index,
cur_channel: 2,
})
}
pub fn next_sample(&mut self) -> Result<(), std::io::Error> {
self.cur_channel = 0;
if self.sample_num == 0 {
// The initial sample values are NOT byte-aligned.
self.left_sample = self.inner.read_signed(16)?;
self.left_step_index = self.inner.read::<u16>(6)? as i16;
self.left_step = Self::STEP_TABLE[self.left_step_index as usize];
if self.is_stereo {
self.right_sample = self.inner.read_signed(16)?;
self.right_step_index = self.inner.read::<u16>(6)? as i16;
self.right_step = Self::STEP_TABLE[self.right_step_index as usize];
}
}
self.sample_num = (self.sample_num + 1) % 4095;
let data: i32 = self.inner.read::<u32>(self.bits_per_sample as u32)? as i32;
self.left_step = Self::STEP_TABLE[self.left_step_index as usize];
// (data + 0.5) * step / 2^(bits_per_sample - 2)
// Data is sign-magnitude, NOT two's complement.
// TODO(Herschel): Other implementations use some bit-tricks for this.
let sign_mask = 1 << (self.bits_per_sample - 1);
let magnitude = data & !sign_mask;
let delta = (2 * magnitude + 1) * self.left_step / sign_mask;
if (data & sign_mask) != 0 {
self.left_sample -= delta;
} else {
self.left_sample += delta;
}
if self.left_sample < -32768 {
self.left_sample = 32768;
} else if self.left_sample > 32767 {
self.left_sample = 32767;
}
let i = magnitude as usize;
self.left_step_index += Self::INDEX_TABLE[self.bits_per_sample - 2][i];
if self.left_step_index < 0 {
self.left_step_index = 0;
} else if self.left_step_index >= Self::STEP_TABLE.len() as i16 {
self.left_step_index = Self::STEP_TABLE.len() as i16 - 1;
}
if self.is_stereo {
let data = self.inner.read::<u32>(self.bits_per_sample as u32)? as i32;
self.right_step = Self::STEP_TABLE[self.right_step_index as usize];
let sign_mask = 1 << (self.bits_per_sample - 1);
let magnitude = data & !sign_mask;
let delta = (2 * magnitude + 1) * self.right_step / sign_mask;
if (data & sign_mask) != 0 {
self.right_sample -= delta;
} else {
self.right_sample += delta;
}
if self.right_sample < -32768 {
self.right_sample = 32768;
} else if self.right_sample > 32767 {
self.right_sample = 32767;
}
let i = magnitude as usize;
self.right_step_index += Self::INDEX_TABLE[self.bits_per_sample - 2][i];
if self.right_step_index < 0 {
self.right_step_index = 0;
} else if self.right_step_index >= Self::STEP_TABLE.len() as i16 {
self.right_step_index = Self::STEP_TABLE.len() as i16 - 1;
}
}
Ok(())
}
}
impl<R: Read> Iterator for AdpcmDecoder<R> {
type Item = i16;
fn next(&mut self) -> Option<i16> {
if self.cur_channel >= if self.is_stereo { 2 } else { 1 } {
self.next_sample().ok()?;
}
let sample = if self.cur_channel == 0 {
self.left_sample
} else {
self.right_sample
};
self.cur_channel += 1;
Some(sample as i16)
}
}
impl<R: std::io::Read> Decoder for AdpcmDecoder<R> {
#[inline]
fn num_channels(&self) -> u8 {
if self.is_stereo {
2
} else {
1
}
}
#[inline]
fn sample_rate(&self) -> u16 {
self.sample_rate
}
}

View File

@ -0,0 +1,124 @@
#[cfg(feature = "minimp3")]
#[allow(dead_code)]
pub struct Mp3Decoder<R: std::io::Read> {
decoder: minimp3::Decoder<R>,
sample_rate: u32,
num_channels: u16,
cur_frame: minimp3::Frame,
cur_sample: usize,
num_samples: usize,
}
#[cfg(feature = "minimp3")]
impl<R: std::io::Read> Mp3Decoder<R> {
pub fn new(num_channels: u16, sample_rate: u32, reader: R) -> Self {
Mp3Decoder {
decoder: minimp3::Decoder::new(reader),
num_channels,
sample_rate,
cur_frame: unsafe { std::mem::zeroed::<minimp3::Frame>() },
cur_sample: 0,
num_samples: 0,
}
}
fn next_frame(&mut self) {
if let Ok(frame) = self.decoder.next_frame() {
self.num_samples = frame.data.len();
self.cur_frame = frame;
} else {
self.num_samples = 0;
}
self.cur_sample = 0;
}
}
#[cfg(feature = "minimp3")]
impl<R: std::io::Read> Iterator for Mp3Decoder<R> {
type Item = i16;
#[inline]
fn next(&mut self) -> Option<i16> {
if self.cur_sample >= self.num_samples {
self.next_frame();
}
if self.num_samples > 0 {
let sample = self.cur_frame.data[self.cur_sample];
self.cur_sample += 1;
Some(sample)
} else {
None
}
}
}
#[cfg(all(feature = "puremp3", not(feature = "minimp3")))]
pub struct Mp3Decoder<R: std::io::Read> {
decoder: puremp3::Mp3Decoder<R>,
sample_rate: u32,
num_channels: u16,
cur_frame: puremp3::Frame,
cur_sample: usize,
cur_channel: usize,
}
#[cfg(all(feature = "puremp3", not(feature = "minimp3")))]
impl<R: std::io::Read> Mp3Decoder<R> {
pub fn new(num_channels: u16, sample_rate: u32, reader: R) -> Self {
Mp3Decoder {
decoder: puremp3::Mp3Decoder::new(reader),
num_channels,
sample_rate,
cur_frame: unsafe { std::mem::zeroed::<puremp3::Frame>() },
cur_sample: 0,
cur_channel: 0,
}
}
fn next_frame(&mut self) {
if let Ok(frame) = self.decoder.next_frame() {
self.cur_frame = frame;
} else {
self.cur_frame.num_samples = 0;
}
self.cur_sample = 0;
self.cur_channel = 0;
}
}
impl<R: std::io::Read> super::Decoder for Mp3Decoder<R> {
#[inline]
fn num_channels(&self) -> u8 {
self.num_channels as u8
}
#[inline]
fn sample_rate(&self) -> u16 {
self.sample_rate as u16
}
}
#[cfg(all(feature = "puremp3", not(feature = "minimp3")))]
impl<R: std::io::Read> Iterator for Mp3Decoder<R> {
type Item = i16;
#[inline]
fn next(&mut self) -> Option<i16> {
if self.cur_sample >= self.cur_frame.num_samples {
self.next_frame();
}
if self.cur_frame.num_samples > 0 {
let sample = self.cur_frame.samples[self.cur_channel][self.cur_sample];
self.cur_channel += 1;
if self.cur_channel >= usize::from(self.num_channels) {
self.cur_channel = 0;
self.cur_sample += 1;
}
Some((sample * 32767.0) as i16)
} else {
None
}
}
}

View File

@ -15,7 +15,7 @@ mod movie_clip;
mod player;
mod prelude;
pub mod shape_utils;
mod tag_utils;
pub mod tag_utils;
mod text;
mod transform;
@ -23,3 +23,4 @@ pub mod backend;
pub use player::Player;
pub use swf::Color;
pub use swf;

View File

@ -19,17 +19,17 @@ type FrameNumber = u16;
#[derive(Clone)]
pub struct MovieClip<'gc> {
base: DisplayObjectBase,
id: CharacterId,
tag_stream_start: u64,
tag_stream_pos: u64,
tag_stream_len: usize,
is_playing: bool,
action: Option<(usize, usize)>,
goto_queue: Vec<FrameNumber>,
current_frame: FrameNumber,
total_frames: FrameNumber,
audio_stream_info: Option<swf::SoundStreamHead>,
audio_stream: Option<AudioStreamHandle>,
stream_started: bool,
children: BTreeMap<Depth, DisplayNode<'gc>>,
}
@ -38,32 +38,37 @@ impl<'gc> MovieClip<'gc> {
pub fn new() -> Self {
Self {
base: Default::default(),
id: 0,
tag_stream_start: 0,
tag_stream_pos: 0,
tag_stream_len: 0,
is_playing: true,
action: None,
goto_queue: Vec::new(),
current_frame: 0,
total_frames: 1,
audio_stream: None,
stream_started: false,
audio_stream_info: None,
children: BTreeMap::new(),
}
}
pub fn new_with_data(tag_stream_start: u64, tag_stream_len: usize, num_frames: u16) -> Self {
pub fn new_with_data(
id: CharacterId,
tag_stream_start: u64,
tag_stream_len: usize,
num_frames: u16,
) -> Self {
Self {
base: Default::default(),
id,
tag_stream_start,
tag_stream_pos: 0,
tag_stream_len,
is_playing: true,
action: None,
goto_queue: Vec::new(),
current_frame: 0,
audio_stream: None,
stream_started: false,
audio_stream_info: None,
total_frames: num_frames,
children: BTreeMap::new(),
}
@ -161,10 +166,6 @@ impl<'gc> MovieClip<'gc> {
None
}
pub fn action(&self) -> Option<(usize, usize)> {
self.action
}
pub fn run_goto_queue(&mut self, context: &mut UpdateContext<'_, 'gc, '_>) {
let mut i = 0;
while i < self.goto_queue.len() {
@ -235,8 +236,8 @@ impl<'gc> MovieClip<'gc> {
};
let _ = tag_utils::decode_tags(&mut reader, tag_callback, TagCode::ShowFrame);
} else {
let tag_callback = |reader: &mut _, tag_code, _tag_len| match tag_code {
TagCode::DoAction => self.do_action(context, reader),
let tag_callback = |reader: &mut _, tag_code, tag_len| match tag_code {
TagCode::DoAction => self.do_action(context, reader, tag_len),
TagCode::PlaceObject => self.place_object(context, reader, 1),
TagCode::PlaceObject2 => self.place_object(context, reader, 2),
TagCode::PlaceObject3 => self.place_object(context, reader, 3),
@ -246,8 +247,6 @@ impl<'gc> MovieClip<'gc> {
TagCode::SetBackgroundColor => self.set_background_color(context, reader),
TagCode::StartSound => self.start_sound_1(context, reader),
TagCode::SoundStreamBlock => self.sound_stream_block(context, reader),
TagCode::SoundStreamHead => self.sound_stream_head(context, reader, 1),
TagCode::SoundStreamHead2 => self.sound_stream_head(context, reader, 2),
_ => Ok(()),
};
let _ = tag_utils::decode_tags(&mut reader, tag_callback, TagCode::ShowFrame);
@ -284,7 +283,7 @@ impl<'gc> DisplayObject<'gc> for MovieClip<'gc> {
TagCode::DefineShape2 => self.define_shape(context, reader, 2),
TagCode::DefineShape3 => self.define_shape(context, reader, 3),
TagCode::DefineShape4 => self.define_shape(context, reader, 4),
TagCode::DefineSound => self.define_sound(context, reader),
TagCode::DefineSound => self.define_sound(context, reader, tag_len),
TagCode::DefineSprite => self.define_sprite(context, reader, tag_len),
TagCode::DefineText => self.define_text(context, reader),
TagCode::JpegTables => self.jpeg_tables(context, reader, tag_len),
@ -294,14 +293,18 @@ impl<'gc> DisplayObject<'gc> for MovieClip<'gc> {
TagCode::PlaceObject4 => self.preload_place_object(context, reader, &mut ids, 4),
TagCode::RemoveObject => self.preload_remove_object(context, reader, &mut ids, 1),
TagCode::RemoveObject2 => self.preload_remove_object(context, reader, &mut ids, 2),
TagCode::SoundStreamHead => self.preload_sound_stream_head(context, reader, 1),
TagCode::SoundStreamHead2 => self.preload_sound_stream_head(context, reader, 2),
TagCode::SoundStreamBlock => self.preload_sound_stream_block(context, reader, tag_len),
_ => Ok(()),
};
let _ = tag_utils::decode_tags(&mut reader, tag_callback, TagCode::End);
if self.audio_stream_info.is_some() {
context.audio.preload_sound_stream_end(self.id);
}
}
fn run_frame(&mut self, context: &mut UpdateContext<'_, 'gc, '_>) {
self.action = None;
if self.is_playing {
self.run_frame_internal(context, false);
}
@ -460,6 +463,39 @@ impl<'gc, 'a> MovieClip<'gc> {
Ok(())
}
#[inline]
fn preload_sound_stream_block(
&mut self,
context: &mut UpdateContext<'_, 'gc, '_>,
reader: &mut SwfStream<&'a [u8]>,
tag_len: usize,
) -> DecodeResult {
if self.audio_stream_info.is_some() {
let pos = reader.get_ref().position() as usize;
let data = reader.get_ref().get_ref();
let data = &data[pos..pos + tag_len];
context.audio.preload_sound_stream_block(self.id, data);
}
Ok(())
}
#[inline]
fn preload_sound_stream_head(
&mut self,
context: &mut UpdateContext<'_, 'gc, '_>,
reader: &mut SwfStream<&'a [u8]>,
_version: u8,
) -> DecodeResult {
let audio_stream_info = reader.read_sound_stream_head()?;
context
.audio
.preload_sound_stream_head(self.id, &audio_stream_info);
self.audio_stream_info = Some(audio_stream_info);
Ok(())
}
#[inline]
fn define_bits(
&mut self,
@ -610,8 +646,12 @@ impl<'gc, 'a> MovieClip<'gc> {
&mut self,
context: &mut UpdateContext<'_, 'gc, '_>,
reader: &mut SwfStream<&'a [u8]>,
tag_len: usize,
) -> DecodeResult {
// TODO(Herschel): Can we use a slice of the sound data instead of copying the data?
use std::io::Read;
let mut reader =
swf::read::Reader::new(reader.get_mut().take(tag_len as u64), context.swf_version);
let sound = reader.read_define_sound()?;
let handle = context.audio.register_sound(&sound).unwrap();
context
@ -629,7 +669,7 @@ impl<'gc, 'a> MovieClip<'gc> {
let id = reader.read_character_id()?;
let num_frames = reader.read_u16()?;
let mut movie_clip =
MovieClip::new_with_data(reader.get_ref().position(), tag_len - 4, num_frames);
MovieClip::new_with_data(id, reader.get_ref().position(), tag_len - 4, num_frames);
movie_clip.preload(context);
@ -695,10 +735,17 @@ impl<'gc, 'a> MovieClip<'gc> {
#[inline]
fn do_action(
&mut self,
_context: &mut UpdateContext<'_, 'gc, '_>,
_reader: &mut SwfStream<&'a [u8]>,
context: &mut UpdateContext<'_, 'gc, '_>,
reader: &mut SwfStream<&'a [u8]>,
tag_len: usize,
) -> DecodeResult {
// TODO
// Queue the actions.
let slice = crate::tag_utils::SwfSlice {
data: std::sync::Arc::clone(context.swf_data),
start: reader.get_ref().position() as usize,
end: reader.get_ref().position() as usize + tag_len,
};
context.actions.push(slice);
Ok(())
}
@ -822,21 +869,19 @@ impl<'gc, 'a> MovieClip<'gc> {
#[inline]
fn sound_stream_block(
&mut self,
_context: &mut UpdateContext<'_, 'gc, '_>,
context: &mut UpdateContext<'_, 'gc, '_>,
_reader: &mut SwfStream<&'a [u8]>,
) -> DecodeResult {
// TODO
Ok(())
if let (Some(stream_info), None) = (&self.audio_stream_info, &self.audio_stream) {
let slice = crate::tag_utils::SwfSlice {
data: std::sync::Arc::clone(context.swf_data),
start: self.tag_stream_start as usize,
end: self.tag_stream_start as usize + self.tag_stream_len,
};
let audio_stream = context.audio.start_stream(self.id, slice, stream_info);
self.audio_stream = Some(audio_stream);
}
#[inline]
fn sound_stream_head(
&mut self,
_context: &mut UpdateContext<'_, 'gc, '_>,
_reader: &mut SwfStream<&'a [u8]>,
_version: u8,
) -> DecodeResult {
// TODO
Ok(())
}

View File

@ -8,6 +8,7 @@ use crate::prelude::*;
use crate::transform::TransformStack;
use gc_arena::{make_arena, ArenaParameters, Collect, GcCell, MutationContext};
use log::info;
use std::sync::Arc;
#[derive(Collect)]
#[collect(empty_drop)]
@ -19,7 +20,7 @@ struct GcRoot<'gc> {
make_arena!(GcArena, GcRoot);
pub struct Player {
swf_data: Vec<u8>,
swf_data: Arc<Vec<u8>>,
swf_version: u8,
avm: Avm1,
@ -59,7 +60,7 @@ impl Player {
renderer.set_dimensions(movie_width, movie_height);
let mut player = Player {
swf_data: data,
swf_data: Arc::new(data),
swf_version: header.version,
avm: Avm1::new(header.version),
@ -76,7 +77,10 @@ impl Player {
gc_arena: GcArena::new(ArenaParameters::default(), |gc_context| GcRoot {
library: GcCell::allocate(gc_context, Library::new()),
root: GcCell::allocate(gc_context, MovieClip::new_with_data(0, swf_len, header.num_frames)),
root: GcCell::allocate(
gc_context,
MovieClip::new_with_data(0, 0, swf_len, header.num_frames),
),
}),
frame_rate: header.frame_rate.into(),
@ -95,6 +99,12 @@ impl Player {
}
pub fn tick(&mut self, dt: f64) {
// Don't run until preloading is complete.
// TODO: Eventually we want to stream content similar to the Flash player.
if !self.audio.is_loading_complete() {
return;
}
self.frame_accumulator += dt;
self.global_time += dt as u64;
let frame_time = 1000.0 / self.frame_rate;
@ -151,7 +161,7 @@ impl Player {
avm,
renderer,
audio,
action: None,
actions: vec![],
gc_context,
};
@ -182,7 +192,7 @@ impl Player {
avm,
renderer,
audio,
action: None,
actions: vec![],
gc_context,
};
@ -218,7 +228,7 @@ impl Player {
pub struct UpdateContext<'a, 'gc, 'gc_context> {
pub swf_version: u8,
pub swf_data: &'a [u8],
pub swf_data: &'a Arc<Vec<u8>>,
pub global_time: u64,
pub mouse_pos: (f32, f32),
pub library: std::cell::RefMut<'a, Library<'gc>>,
@ -227,7 +237,7 @@ pub struct UpdateContext<'a, 'gc, 'gc_context> {
pub avm: &'a mut Avm1,
pub renderer: &'a mut RenderBackend,
pub audio: &'a mut Audio,
pub action: Option<(usize, usize)>,
pub actions: Vec<crate::tag_utils::SwfSlice>,
}
pub struct RenderContext<'a, 'gc> {

View File

@ -3,6 +3,18 @@ use swf::TagCode;
pub type DecodeResult = Result<(), Box<std::error::Error>>;
pub type SwfStream<R> = swf::read::Reader<std::io::Cursor<R>>;
pub struct SwfSlice {
pub data: std::sync::Arc<Vec<u8>>,
pub start: usize,
pub end: usize,
}
impl AsRef<[u8]> for SwfSlice {
fn as_ref(&self) -> &[u8] {
&self.data[self.start..self.end]
}
}
pub fn decode_tags<'a, R, F>(
reader: &'a mut SwfStream<R>,
mut tag_callback: F,

View File

@ -15,8 +15,6 @@ inflate = "0.4.5"
jpeg-decoder = "0.1.15"
log = "0.4"
lyon = "0.13.3"
minimp3 = { git = "https://github.com/germangb/minimp3-rs" }
structopt = "0.2.15"
winit = "0.19.1"

View File

@ -1,26 +1,27 @@
use generational_arena::Arena;
use rodio::{source::Source, Sink};
use ruffle_core::backend::audio::decoders::{stream_tag_reader, AdpcmDecoder, Decoder, Mp3Decoder};
use ruffle_core::backend::audio::{swf, AudioBackend, AudioStreamHandle, SoundHandle};
use std::io::Cursor;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
pub struct RodioAudioBackend {
sounds: Arena<Sound>,
active_sounds: Arena<Sink>,
active_sounds: Arena<rodio::Sink>,
streams: Arena<AudioStream>,
device: rodio::Device,
}
#[allow(dead_code)]
struct AudioStream {
clip_id: swf::CharacterId,
info: swf::SoundStreamHead,
sink: rodio::Sink,
data: Arc<Mutex<Cursor<Vec<u8>>>>,
}
#[allow(dead_code)]
struct Sound {
format: swf::SoundFormat,
data: Vec<u8>,
data: Arc<Vec<u8>>,
}
impl RodioAudioBackend {
@ -41,28 +42,32 @@ impl AudioBackend for RodioAudioBackend {
) -> Result<SoundHandle, Box<std::error::Error>> {
let sound = Sound {
format: swf_sound.format.clone(),
data: swf_sound.data.clone(),
data: Arc::new(swf_sound.data.clone()),
};
Ok(self.sounds.insert(sound))
}
fn register_stream(&mut self, stream_info: &swf::SoundStreamHead) -> AudioStreamHandle {
let sink = Sink::new(&self.device);
let data = Arc::new(Mutex::new(Cursor::new(vec![])));
fn start_stream(
&mut self,
clip_id: swf::CharacterId,
clip_data: ruffle_core::tag_utils::SwfSlice,
stream_info: &swf::SoundStreamHead,
) -> AudioStreamHandle {
let sink = rodio::Sink::new(&self.device);
let format = &stream_info.stream_format;
let decoder = Mp3Decoder::new(
if format.is_stereo { 2 } else { 1 },
format.sample_rate.into(),
ThreadRead(Arc::clone(&data)),
)
.unwrap();
stream_tag_reader(clip_data),
);
let stream = AudioStream {
clip_id,
info: stream_info.clone(),
sink,
data,
};
stream.sink.append(decoder);
stream.sink.append(DecoderSource(Box::new(decoder)));
self.streams.insert(stream)
}
@ -84,211 +89,68 @@ impl AudioBackend for RodioAudioBackend {
sound.format.sample_rate.into(),
data,
);
let sink = Sink::new(&self.device);
let sink = rodio::Sink::new(&self.device);
sink.append(buffer);
self.active_sounds.insert(sink);
}
AudioCompression::Adpcm => {
let decoder = AdpcmDecoder::new(
Cursor::new(sound.data.to_vec()),
sound.format.is_stereo,
sound.format.sample_rate,
)
.unwrap();
let sink = rodio::Sink::new(&self.device);
sink.append(DecoderSource(Box::new(decoder)));
self.active_sounds.insert(sink);
}
AudioCompression::Mp3 => {
let decoder = Mp3EventDecoder::new(Cursor::new(sound.data.clone())).unwrap();
let sink = Sink::new(&self.device);
sink.append(decoder);
let decoder = Mp3Decoder::new(
if sound.format.is_stereo { 2 } else { 1 },
sound.format.sample_rate.into(),
Cursor::new(sound.data.to_vec()),
);
let sink = rodio::Sink::new(&self.device);
sink.append(DecoderSource(Box::new(decoder)));
self.active_sounds.insert(sink);
}
_ => unimplemented!(),
}
}
fn queue_stream_samples(&mut self, handle: AudioStreamHandle, mut samples: &[u8]) {
if let Some(stream) = self.streams.get_mut(handle) {
let _tag_samples = u16::from(samples[0]) | (u16::from(samples[1]) << 8);
samples = &samples[4..];
let mut buffer = stream.data.lock().unwrap();
buffer.get_mut().extend_from_slice(&samples);
}
}
fn tick(&mut self) {
self.active_sounds.retain(|_, sink| !sink.empty());
}
}
use std::io::{self, Read, Seek};
use std::time::Duration;
struct DecoderSource(Box<Decoder + Send>);
use minimp3::{Decoder, Frame};
pub struct ThreadRead(Arc<Mutex<Cursor<Vec<u8>>>>);
impl Read for ThreadRead {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut buffer = self.0.lock().unwrap();
let result = buffer.read(buf);
let len_remaining = buffer.get_ref().len() - buffer.position() as usize;
let tmp = buffer.get_ref()[buffer.position() as usize..].to_vec();
buffer.get_mut().resize(len_remaining, 0);
*buffer.get_mut() = tmp;
buffer.set_position(0);
result
}
}
impl Seek for ThreadRead {
fn seek(&mut self, pos: std::io::SeekFrom) -> io::Result<u64> {
self.0.lock().unwrap().seek(pos)
}
}
pub struct Mp3Decoder {
decoder: Decoder<ThreadRead>,
sample_rate: u32,
num_channels: u16,
current_frame: Frame,
current_frame_offset: usize,
playing: bool,
}
impl Mp3Decoder {
pub fn new(num_channels: u16, sample_rate: u32, data: ThreadRead) -> Result<Self, ()> {
let decoder = Decoder::new(data);
let current_frame = Frame {
data: vec![],
sample_rate: sample_rate as _,
channels: num_channels as _,
layer: 3,
bitrate: 160,
};
Ok(Mp3Decoder {
decoder,
num_channels,
sample_rate,
current_frame,
current_frame_offset: 0,
playing: false,
})
}
}
impl Source for Mp3Decoder {
#[inline]
fn current_frame_len(&self) -> Option<usize> {
None //Some(self.current_frame.data.len())
}
#[inline]
fn channels(&self) -> u16 {
self.num_channels
}
#[inline]
fn sample_rate(&self) -> u32 {
self.sample_rate
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
None
}
}
impl Iterator for Mp3Decoder {
impl Iterator for DecoderSource {
type Item = i16;
#[inline]
fn next(&mut self) -> Option<i16> {
if !self.playing {
let buffer = self.decoder.reader().0.lock().unwrap();
if buffer.get_ref().len() < 44100 / 60 {
return Some(0);
}
self.playing = true;
}
if self.current_frame_offset == self.current_frame.data.len() {
match self.decoder.next_frame() {
Ok(frame) => self.current_frame = frame,
_ => return Some(0),
}
self.current_frame_offset = 0;
}
let v = self.current_frame.data[self.current_frame_offset];
self.current_frame_offset += 1;
Some(v)
self.0.next()
}
}
pub struct Mp3EventDecoder<R>
where
R: Read + Seek,
{
decoder: Decoder<R>,
current_frame: Frame,
current_frame_offset: usize,
}
impl<R> Mp3EventDecoder<R>
where
R: Read + Seek,
{
pub fn new(data: R) -> Result<Self, ()> {
let mut decoder = Decoder::new(data);
let current_frame = decoder.next_frame().map_err(|_| ())?;
Ok(Mp3EventDecoder {
decoder,
current_frame,
current_frame_offset: 0,
})
}
}
impl<R> Source for Mp3EventDecoder<R>
where
R: Read + Seek,
{
impl rodio::Source for DecoderSource {
#[inline]
fn current_frame_len(&self) -> Option<usize> {
Some(self.current_frame.data.len())
None
}
#[inline]
fn channels(&self) -> u16 {
self.current_frame.channels as _
self.0.num_channels().into()
}
#[inline]
fn sample_rate(&self) -> u32 {
self.current_frame.sample_rate as _
self.0.sample_rate().into()
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
fn total_duration(&self) -> Option<std::time::Duration> {
None
}
}
impl<R> Iterator for Mp3EventDecoder<R>
where
R: Read + Seek,
{
type Item = i16;
#[inline]
fn next(&mut self) -> Option<i16> {
if self.current_frame_offset == self.current_frame.data.len() {
self.current_frame_offset = 0;
match self.decoder.next_frame() {
Ok(frame) => self.current_frame = frame,
_ => return None,
}
}
let v = self.current_frame.data[self.current_frame_offset];
self.current_frame_offset += 1;
Some(v)
}
}

View File

@ -15,21 +15,25 @@ base64 = "0.10.1"
byteorder = "1.3.1"
console_error_panic_hook = { version = "0.1.1", optional = true }
console_log = { version = "0.1", optional = true }
ruffle_core = { path = "../core" }
fnv = "1.0.3"
generational-arena = "0.2.2"
inflate = "0.4.5"
jpeg-decoder = "0.1.15"
js-sys = "0.3.19"
js-sys = "0.3.25"
log = "0.4"
png = "0.14.1"
svg = "0.5.12"
url = "1.7.2"
wasm-bindgen = "0.2.44"
wasm-bindgen = "0.2.48"
[dependencies.ruffle_core]
path = "../core"
default-features = false
features = ["puremp3"]
[dependencies.web-sys]
version = "0.3.19"
features = ["AudioBuffer", "AudioBufferSourceNode", "AudioContext", "AudioDestinationNode", "AudioNode", "CanvasRenderingContext2d", "CssStyleDeclaration", "Document", "Element", "HtmlCanvasElement", "HtmlElement", "HtmlImageElement", "Node", "Performance", "Window"]
version = "0.3.25"
features = ["AudioBuffer", "AudioBufferSourceNode", "AudioProcessingEvent", "AudioContext", "AudioDestinationNode", "AudioNode", "CanvasRenderingContext2d", "CssStyleDeclaration", "Document", "Element", "HtmlCanvasElement", "HtmlElement", "HtmlImageElement", "Node", "Performance", "ScriptProcessorNode", "Window"]
[dev-dependencies]
wasm-bindgen-test = "0.2.44"
wasm-bindgen-test = "0.2.48"

View File

@ -4,13 +4,21 @@ const WasmPackPlugin = require("@wasm-tool/wasm-pack-plugin");
const webpack = require('webpack');
const path = require('path');
module.exports = {
module.exports = (env, argv) => {
let mode = "development";
if (argv && argv.mode) {
mode = argv.mode;
}
console.log(`Building ${mode}...`);
return {
entry: path.resolve(__dirname, "www/bootstrap.js"),
output: {
path: path.resolve(__dirname, "dist"),
filename: "index.js",
},
mode: "development",
mode: mode,
plugins: [
new CleanWebpackPlugin(),
new CopyWebpackPlugin([{
@ -20,6 +28,8 @@ module.exports = {
new WasmPackPlugin({
crateDirectory: path.resolve(__dirname, ".."),
extraArgs: "--out-name=ruffle",
forceMode: mode,
})
]
}
};

View File

@ -1,28 +1,57 @@
use fnv::FnvHashMap;
use generational_arena::Arena;
use js_sys::Uint8Array;
use ruffle_core::backend::audio::{swf, AudioBackend, AudioStreamHandle, SoundHandle};
use ruffle_core::backend::audio::decoders::{AdpcmDecoder, Mp3Decoder};
use ruffle_core::backend::audio::{AudioBackend, AudioStreamHandle, SoundHandle};
use ruffle_core::backend::audio::swf::{self, AudioCompression};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use wasm_bindgen::{closure::Closure, JsCast};
use web_sys::AudioContext;
thread_local! {
//pub static SOUNDS: RefCell<Vec<>>>> = RefCell::new(vec![]);
}
pub struct WebAudioBackend {
context: AudioContext,
sounds: Arena<Sound>,
streams: Arena<AudioStream>,
stream_data: FnvHashMap<swf::CharacterId, StreamData>,
id_to_sound: FnvHashMap<swf::CharacterId, SoundHandle>,
left_samples: Vec<f32>,
right_samples: Vec<f32>,
}
thread_local! {
static STREAMS: RefCell<Arena<AudioStream>> = RefCell::new(Arena::new());
static NUM_SOUNDS_LOADING: Cell<u32> = Cell::new(0);
}
struct StreamData {
format: swf::SoundFormat,
audio_data: Vec<u8>,
num_sample_frames: u32,
samples_per_block: u32,
}
type AudioBufferPtr = Rc<RefCell<web_sys::AudioBuffer>>;
// A sound can be either as a JS AudioBuffer and as a on--the-fly decoded stream using a ScriptProcessorNode.
#[allow(dead_code)]
enum SoundSource {
// Pre-decoded audio buffer.
AudioBuffer(AudioBufferPtr),
// Decode the audio data on the fly from a byte stream.
Decoder(Vec<u8>),
}
struct Sound {
object: js_sys::Object,
format: swf::SoundFormat,
source: SoundSource,
}
struct AudioStream {
info: swf::SoundStreamHead,
compressed_data: Vec<u8>,
sample_data: [Vec<f32>; 2],
object: js_sys::Object,
type Decoder = Box<dyn Iterator<Item=i16>>;
#[allow(dead_code)]
enum AudioStream {
Decoder { decoder: Decoder, is_stereo: bool, },// closure: Option<Closure<Box<FnMut(web_sys::AudioProcessingEvent)>>> } ,
AudioBuffer { node: web_sys::AudioBufferSourceNode },
}
type Error = Box<std::error::Error>;
@ -33,274 +62,368 @@ impl WebAudioBackend {
Ok(Self {
context,
sounds: Arena::new(),
streams: Arena::new(),
stream_data: FnvHashMap::default(),
id_to_sound: FnvHashMap::default(),
left_samples: vec![],
right_samples: vec![],
})
}
fn play_sound_internal(&mut self, handle: SoundHandle) -> SoundHandle {
let sound = self.sounds.get(handle).unwrap();
match &sound.source {
SoundSource::AudioBuffer(audio_buffer) => {
let audio_buffer = audio_buffer.borrow();
let node = self.context.create_buffer_source().unwrap();
node.set_buffer(Some(&*audio_buffer));
node
.connect_with_audio_node(&self.context.destination())
.unwrap();
node.start().unwrap();
let audio_stream = AudioStream::AudioBuffer {
node
};
STREAMS.with(|streams| {
let mut streams = streams.borrow_mut();
streams.insert(audio_stream)
})
}
SoundSource::Decoder(audio_data) => {
let decoder: Decoder = match sound.format.compression {
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new(
std::io::Cursor::new(audio_data.to_vec()),
sound.format.is_stereo,
sound.format.sample_rate
).unwrap()),
AudioCompression::Mp3 => Box::new(Mp3Decoder::new(
if sound.format.is_stereo {
2
} else {
1
},
sound.format.sample_rate.into(),
std::io::Cursor::new(audio_data.to_vec())//&sound.data[..]
)),
_ => unimplemented!()
};
let decoder: Decoder = if sound.format.sample_rate != self.context.sample_rate() as u16 {
Box::new(resample(decoder, sound.format.sample_rate, self.context.sample_rate() as u16, sound.format.is_stereo))
} else {
decoder
};
let audio_stream = AudioStream::Decoder {
decoder,
is_stereo: sound.format.is_stereo,
//closure: None,
};
STREAMS.with(|streams| {
let mut streams = streams.borrow_mut();
let stream_handle = streams.insert(audio_stream);
let script_processor_node = self.context.create_script_processor_with_buffer_size_and_number_of_input_channels_and_number_of_output_channels(4096, 0, if sound.format.is_stereo { 2 } else { 1 }).unwrap();
let script_node = script_processor_node.clone();
let closure = Closure::wrap(Box::new(move |event| {
STREAMS.with(|streams| {
let mut streams = streams.borrow_mut();
let audio_stream = streams.get_mut(stream_handle).unwrap();
let complete = WebAudioBackend::update_script_processor(audio_stream, event);
if complete {
streams.remove(stream_handle);
script_node.disconnect().unwrap();
}
})
}) as Box<FnMut(web_sys::AudioProcessingEvent)>);
script_processor_node.set_onaudioprocess(Some(closure.as_ref().unchecked_ref()));
// TODO: This will leak memory per playing sound. Remember and properly drop the closure.
closure.forget();
stream_handle
})
}
}
}
fn decompress_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], num_sample_frames: u32) -> AudioBufferPtr {
if format.compression == AudioCompression::Mp3 {
return self.decompress_mp3_to_audio_buffer(format, audio_data, num_sample_frames);
}
// This sucks. Firefox doesn't like 5512Hz sample rate, so manually double up the samples.
// 5512Hz should be relatively rare.
let audio_buffer = if format.sample_rate > 5512 {
self.context.create_buffer(
if format.is_stereo { 2 } else { 1 },
num_sample_frames,
f32::from(format.sample_rate)
).unwrap()
} else {
self.context.create_buffer(
if format.is_stereo { 2 } else { 1 },
num_sample_frames * 2,
11025.0
).unwrap()
};
match format.compression {
AudioCompression::Uncompressed => {
// TODO: Check for is_16_bit.
self.left_samples = audio_data.iter().step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect();
if format.is_stereo {
self.right_samples = audio_data.iter().skip(1).step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect();
}
}
AudioCompression::Adpcm => {
let mut decoder = AdpcmDecoder::new(audio_data,
format.is_stereo,
format.sample_rate
).unwrap();
if format.is_stereo {
while let (Some(l), Some(r)) = (decoder.next(), decoder.next()) {
self.left_samples.push(f32::from(l) / 32767.0);
self.right_samples.push(f32::from(r) / 32767.0);
}
} else {
self.left_samples = decoder.map(|n| f32::from(n) / 32767.0).collect();
}
}
_ => unimplemented!(),
}
// Double up samples for 5512Hz audio to satisfy Firefox.
if format.sample_rate == 5512 {
let mut samples = Vec::with_capacity(self.left_samples.len() * 2);
for sample in &self.left_samples {
samples.push(*sample);
samples.push(*sample);
}
self.left_samples = samples;
if format.is_stereo {
let mut samples = Vec::with_capacity(self.right_samples.len() * 2);
for sample in &self.right_samples {
samples.push(*sample);
samples.push(*sample);
}
self.right_samples = samples;
}
}
audio_buffer.copy_to_channel(&mut self.left_samples, 0).unwrap();
if format.is_stereo {
audio_buffer.copy_to_channel(&mut self.right_samples, 1).unwrap();
}
Rc::new(RefCell::new(audio_buffer))
}
fn decompress_mp3_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], _num_sample_frames: u32) -> AudioBufferPtr {
// We use the Web decodeAudioData API to decode MP3 data.
// TODO: Is it possible we finish loading before the MP3 is decoding?
let audio_buffer = self.context.create_buffer(1, 1, self.context.sample_rate()).unwrap();
let audio_buffer = Rc::new(RefCell::new(audio_buffer));
let data_array = unsafe { js_sys::Uint8Array::view(&audio_data[..]) };
let array_buffer = data_array.buffer().slice_with_end(
data_array.byte_offset(),
data_array.byte_offset() + data_array.byte_length(),
);
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() + 1));
let _num_channels = if format.is_stereo { 2 } else { 1 };
let buffer_ptr = Rc::clone(&audio_buffer);
let success_closure = Closure::wrap(Box::new(move |buffer: web_sys::AudioBuffer| {
*buffer_ptr.borrow_mut() = buffer;
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
})
as Box<dyn FnMut(web_sys::AudioBuffer)>);
let error_closure = Closure::wrap(Box::new(move || {
log::info!("Error decoding MP3 audio");
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
})
as Box<dyn FnMut()>);
self.context.decode_audio_data_with_success_callback_and_error_callback(
&array_buffer,
success_closure.as_ref().unchecked_ref(),
error_closure.as_ref().unchecked_ref()
).unwrap();
// TODO: This will leak memory (once per decompressed MP3).
// Not a huge deal as there are probably not many MP3s in an SWF.
success_closure.forget();
error_closure.forget();
audio_buffer
}
fn update_script_processor(
audio_stream: &mut AudioStream,
event: web_sys::AudioProcessingEvent,
) -> bool {
let mut complete = false;
let mut left_samples = vec![];
let mut right_samples = vec![];
if let AudioStream::Decoder { decoder, is_stereo, .. } = audio_stream {
let output_buffer = event.output_buffer().unwrap();
let num_frames = output_buffer.length() as usize;
for _ in 0..num_frames {
if let (Some(l), Some(r)) = (decoder.next(), decoder.next()) {
left_samples.push(f32::from(l) / 32767.0);
if *is_stereo {
right_samples.push(f32::from(r) / 32767.0);
}
} else {
complete = true;
break;
}
}
output_buffer.copy_to_channel(&mut left_samples[..], 0).unwrap();
if *is_stereo {
output_buffer.copy_to_channel(&mut right_samples[..], 1).unwrap();
}
}
complete
}
}
impl AudioBackend for WebAudioBackend {
fn register_sound(&mut self, swf_sound: &swf::Sound) -> Result<SoundHandle, Error> {
let object = js_sys::Object::new();
let sound = Sound {
object: object.clone(),
};
let value = wasm_bindgen::JsValue::from(object);
let handle = self.sounds.insert(sound);
// Firefox doesn't seem to support <11025Hz sample rates.
let (sample_multiplier, sample_rate) = if swf_sound.format.sample_rate < 11025 {
(2, 11025)
fn register_sound(&mut self, sound: &swf::Sound) -> Result<SoundHandle, Error> {
// Slice off latency seek for MP3 data.
let data = if sound.format.compression == AudioCompression::Mp3 {
&sound.data[2..]
} else {
(1, swf_sound.format.sample_rate)
&sound.data[..]
};
log::info!(
"Compression: {:?} SR: {} {} {}",
swf_sound.format.compression,
swf_sound.format.sample_rate,
sample_rate,
sample_multiplier
);
use byteorder::{LittleEndian, ReadBytesExt};
match swf_sound.format.compression {
swf::AudioCompression::Uncompressed => {
let num_channels: usize = if swf_sound.format.is_stereo { 2 } else { 1 };
let num_frames = swf_sound.data.len() * sample_multiplier / num_channels;
let audio_buffer = self
.context
.create_buffer(num_channels as u32, num_frames as u32, sample_rate.into())
.unwrap();
let mut out = Vec::with_capacity(num_channels);
for _ in 0..num_channels {
out.push(Vec::with_capacity(num_frames));
}
let mut data = &swf_sound.data[..];
while !data.is_empty() {
for channel in &mut out {
if sample_rate != swf_sound.format.sample_rate {
let sample = f32::from(data.read_i16::<LittleEndian>()?) / 32768.0;
for _ in 0..sample_multiplier {
channel.push(sample);
}
}
}
}
for (i, channel) in out.iter_mut().enumerate() {
audio_buffer
.copy_to_channel(&mut channel[..], i as i32)
.unwrap();
}
js_sys::Reflect::set(&value, &"buffer".into(), &audio_buffer).unwrap();
}
swf::AudioCompression::Adpcm => {
let num_channels: usize = if swf_sound.format.is_stereo { 2 } else { 1 };
let audio_buffer = self
.context
.create_buffer(
num_channels as u32,
swf_sound.num_samples * sample_multiplier as u32,
sample_rate.into(),
)
.unwrap();
let mut out = Vec::with_capacity(num_channels);
let data = &swf_sound.data[..];
let mut decoder = ruffle_core::backend::audio::AdpcmDecoder::new(
data,
swf_sound.format.is_stereo,
)?;
for _ in 0..num_channels {
out.push(Vec::with_capacity(swf_sound.num_samples as usize));
}
while let Ok((left, right)) = decoder.next_sample() {
for _ in 0..sample_multiplier {
out[0].push(f32::from(left) / 32768.0);
if swf_sound.format.is_stereo {
out[1].push(f32::from(right) / 32768.0);
}
}
}
for (i, channel) in out.iter_mut().enumerate() {
audio_buffer
.copy_to_channel(&mut channel[..], i as i32)
.unwrap();
}
js_sys::Reflect::set(&value, &"buffer".into(), &audio_buffer).unwrap();
}
swf::AudioCompression::Mp3 => {
let data_array = unsafe { Uint8Array::view(&swf_sound.data[2..]) };
let array_buffer = data_array.buffer().slice_with_end(
data_array.byte_offset(),
data_array.byte_offset() + data_array.byte_length(),
);
let closure = Closure::wrap(Box::new(move |buffer: wasm_bindgen::JsValue| {
js_sys::Reflect::set(&value, &"buffer".into(), &buffer).unwrap();
})
as Box<dyn FnMut(wasm_bindgen::JsValue)>);
self.context
.decode_audio_data(&array_buffer)
.unwrap()
.then(&closure);
closure.forget();
}
_ => unimplemented!(),
}
Ok(handle)
}
fn register_stream(&mut self, stream_info: &swf::SoundStreamHead) -> AudioStreamHandle {
let stream = AudioStream {
info: stream_info.clone(),
sample_data: [vec![], vec![]],
compressed_data: vec![],
object: js_sys::Object::new(),
let sound = Sound {
format: sound.format.clone(),
source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(&sound.format, data, sound.num_samples)),
};
self.streams.insert(stream)
Ok(self.sounds.insert(sound))
}
fn preload_sound_stream_head(&mut self, clip_id: swf::CharacterId, stream_info: &swf::SoundStreamHead) {
self.stream_data.entry(clip_id).or_insert_with(|| {
StreamData {
format: stream_info.stream_format.clone(),
audio_data: vec![],
num_sample_frames: 0,
samples_per_block: stream_info.num_samples_per_block.into(),
}
});
}
fn preload_sound_stream_block(&mut self, clip_id: swf::CharacterId, audio_data: &[u8]) {
if let Some(stream) = self.stream_data.get_mut(&clip_id) {
match stream.format.compression {
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
let frame_len = if stream.format.is_stereo { 2 } else { 1 } * if stream.format.is_16_bit { 2 } else { 1 };
stream.num_sample_frames += (audio_data.len() as u32) / frame_len;
stream.audio_data.extend_from_slice(audio_data);
}
AudioCompression::Mp3 => {
let num_sample_frames = (u32::from(audio_data[2]) << 8) | u32::from(audio_data[3]);
stream.num_sample_frames += num_sample_frames;
// MP3 streaming data:
// First two bytes = number of samples
// Second two bytes = 'latency seek' (amount to skip when seeking to this frame)
stream.audio_data.extend_from_slice(&audio_data[4..]);
}
_ => {
// TODO: This is a guess and will vary slightly from block to block!
stream.num_sample_frames += stream.samples_per_block;
}
}
}
}
fn preload_sound_stream_end(&mut self, clip_id: swf::CharacterId) {
if let Some(stream) = self.stream_data.remove(&clip_id) {
if !stream.audio_data.is_empty()
{
let audio_buffer = self.decompress_to_audio_buffer(&stream.format, &stream.audio_data[..], stream.num_sample_frames);
let handle = self.sounds.insert(Sound {
format: stream.format,
source: SoundSource::AudioBuffer(audio_buffer),
});
self.id_to_sound.insert(clip_id, handle);
}
}
}
fn play_sound(&mut self, sound: SoundHandle) {
if let Some(sound) = self.sounds.get(sound) {
let object = js_sys::Reflect::get(&sound.object, &"buffer".into()).unwrap();
if object.is_undefined() {
return;
}
let buffer: &web_sys::AudioBuffer = object.dyn_ref().unwrap();
let buffer_node = self.context.create_buffer_source().unwrap();
buffer_node.set_buffer(Some(buffer));
buffer_node
.connect_with_audio_node(&self.context.destination())
.unwrap();
buffer_node.start().unwrap();
}
self.play_sound_internal(sound);
}
fn queue_stream_samples(&mut self, _handle: AudioStreamHandle, _samples: &[u8]) {}
fn preload_stream_samples(&mut self, handle: AudioStreamHandle, samples: &[u8]) {
use swf::AudioCompression;
if let Some(stream) = self.streams.get_mut(handle) {
let format = &stream.info.stream_format;
let num_channels = if format.is_stereo { 2 } else { 1 };
let frame_size = num_channels * if format.is_16_bit { 2 } else { 1 };
let _num_frames = samples.len() / frame_size;
let mut i = 0;
match format.compression {
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
if format.is_16_bit {
while i < samples.len() {
for c in 0..num_channels {
let sample = (u16::from(samples[i])
| (u16::from(samples[i + 1]) << 8))
as i16;
stream.sample_data[c].push((f32::from(sample)) / 32768.0);
i += 2;
}
}
} else {
while i < samples.len() {
for c in 0..num_channels {
stream.sample_data[c].push((f32::from(samples[i]) - 127.0) / 128.0);
i += 1;
}
}
}
}
AudioCompression::Mp3 => {
stream.compressed_data.extend_from_slice(&samples[4..]);
}
AudioCompression::Adpcm => {
let mut decoder =
ruffle_core::backend::audio::AdpcmDecoder::new(samples, format.is_stereo)
.unwrap();
while let Ok((left, right)) = decoder.next_sample() {
stream.sample_data[0].push(f32::from(left) / 32768.0);
if format.is_stereo {
stream.sample_data[1].push(f32::from(right) / 32768.0);
}
}
}
_ => (),
}
}
fn start_stream(
&mut self,
clip_id: swf::CharacterId,
_clip_data: ruffle_core::tag_utils::SwfSlice,
_stream_info: &swf::SoundStreamHead,
) -> AudioStreamHandle {
let handle = *self.id_to_sound.get(&clip_id).unwrap();
self.play_sound_internal(handle)
}
fn preload_stream_finalize(&mut self, handle: AudioStreamHandle) {
if let Some(stream) = self.streams.get_mut(handle) {
let format = &stream.info.stream_format;
let num_channels = if format.is_stereo { 2 } else { 1 };
use swf::AudioCompression;
match format.compression {
AudioCompression::UncompressedUnknownEndian
| AudioCompression::Uncompressed
| AudioCompression::Adpcm => {
if stream.sample_data[0].is_empty() {
return;
}
let frame_size = num_channels * if format.is_16_bit { 2 } else { 1 };
let num_frames = stream.sample_data[0].len() / frame_size;
let audio_buffer = self
.context
.create_buffer(
num_channels as u32,
num_frames as u32,
format.sample_rate.into(),
)
.unwrap();
for i in 0..num_channels {
audio_buffer
.copy_to_channel(&mut stream.sample_data[i][..], i as i32)
.unwrap();
}
js_sys::Reflect::set(&stream.object, &"buffer".into(), &audio_buffer).unwrap();
}
AudioCompression::Mp3 => {
if stream.compressed_data.is_empty() {
return;
}
let data_array = unsafe { Uint8Array::view(&stream.compressed_data[..]) };
let array_buffer = data_array.buffer().slice_with_end(
data_array.byte_offset(),
data_array.byte_offset() + data_array.byte_length(),
);
let object = stream.object.clone();
let closure = Closure::wrap(Box::new(move |buffer: wasm_bindgen::JsValue| {
js_sys::Reflect::set(&object, &"buffer".into(), &buffer).unwrap();
})
as Box<dyn FnMut(wasm_bindgen::JsValue)>);
self.context
.decode_audio_data(&array_buffer)
.unwrap()
.then(&closure);
closure.forget();
}
_ => log::info!("Unsupported sound format"),
}
}
}
fn start_stream(&mut self, handle: AudioStreamHandle) -> bool {
if let Some(stream) = self.streams.get_mut(handle) {
let object = js_sys::Reflect::get(&stream.object, &"buffer".into()).unwrap();
if object.is_undefined() {
return false;
}
let buffer: &web_sys::AudioBuffer = object.dyn_ref().unwrap();
log::info!("Playing stream: {:?} {}", handle, buffer.length());
let buffer_node = self.context.create_buffer_source().unwrap();
buffer_node.set_buffer(Some(buffer));
buffer_node
.connect_with_audio_node(&self.context.destination())
.unwrap();
buffer_node.start().unwrap();
}
true
fn is_loading_complete(&self) -> bool {
NUM_SOUNDS_LOADING.with(|n| n.get() == 0)
}
}
// Janky resmapling code.
// TODO: Clean this up.
fn resample(mut input: impl Iterator<Item=i16>, input_sample_rate: u16, output_sample_rate: u16, is_stereo: bool) -> impl Iterator<Item=i16> {
let (mut left0, mut right0) = if is_stereo {
(input.next(), input.next())
} else {
let sample = input.next();
(sample, sample)
};
let (mut left1, mut right1) = if is_stereo {
(input.next(), input.next())
} else {
let sample = input.next();
(sample, sample)
};
let (mut left, mut right) = (left0.unwrap(), right0.unwrap());
let dt_input = 1.0 / f64::from(input_sample_rate);
let dt_output = 1.0 / f64::from(output_sample_rate);
let mut t = 0.0;
let mut cur_channel = 0;
std::iter::from_fn(move || {
if cur_channel == 1 {
cur_channel = 0;
return Some(right);
}
if let (Some(l0), Some(r0), Some(l1), Some(r1)) = (left0, right0, left1, right1) {
let a = t / dt_input;
let l0 = f64::from(l0);
let l1 = f64::from(l1);
let r0 = f64::from(r0);
let r1 = f64::from(r1);
left = (l0 + (l1 - l0) * a) as i16;
right = (r0 + (r1 - r0) * a) as i16;
t += dt_output;
while t >= dt_input {
t -= dt_input;
left0 = left1;
right0 = right1;
left1 = input.next();
if is_stereo {
right1 = input.next();
} else {
right1 = left1;
}
}
cur_channel = 1;
Some(left)
} else {
None
}
})
}