chore: Add rustfmt.toml and rustfmt pass

This commit is contained in:
Mike Welsh 2019-08-26 16:38:37 -07:00
parent 06d9f39c0e
commit 6a5c5ab1df
21 changed files with 6159 additions and 6094 deletions

View File

@ -31,7 +31,10 @@ impl TransformStack {
let cur_transform = self.transform(); let cur_transform = self.transform();
let matrix = cur_transform.matrix * transform.matrix; let matrix = cur_transform.matrix * transform.matrix;
let color_transform = cur_transform.color_transform * transform.color_transform; let color_transform = cur_transform.color_transform * transform.color_transform;
self.0.push(Transform { matrix, color_transform }); self.0.push(Transform {
matrix,
color_transform,
});
} }
pub fn pop(&mut self) { pub fn pop(&mut self) {

1
rustfmt.toml Normal file
View File

@ -0,0 +1 @@
newline_style = "Unix"

View File

@ -1,8 +1,8 @@
use fnv::FnvHashMap; use fnv::FnvHashMap;
use generational_arena::Arena; use generational_arena::Arena;
use ruffle_core::backend::audio::decoders::{AdpcmDecoder, Mp3Decoder}; use ruffle_core::backend::audio::decoders::{AdpcmDecoder, Mp3Decoder};
use ruffle_core::backend::audio::{AudioBackend, AudioStreamHandle, SoundHandle};
use ruffle_core::backend::audio::swf::{self, AudioCompression}; use ruffle_core::backend::audio::swf::{self, AudioCompression};
use ruffle_core::backend::audio::{AudioBackend, AudioStreamHandle, SoundHandle};
use std::cell::{Cell, RefCell}; use std::cell::{Cell, RefCell};
use std::rc::Rc; use std::rc::Rc;
use wasm_bindgen::{closure::Closure, JsCast}; use wasm_bindgen::{closure::Closure, JsCast};
@ -46,12 +46,17 @@ struct Sound {
source: SoundSource, source: SoundSource,
} }
type Decoder = Box<dyn Iterator<Item=i16>>; type Decoder = Box<dyn Iterator<Item = i16>>;
#[allow(dead_code)] #[allow(dead_code)]
enum AudioStream { enum AudioStream {
Decoder { decoder: Decoder, is_stereo: bool, },// closure: Option<Closure<Box<FnMut(web_sys::AudioProcessingEvent)>>> } , Decoder {
AudioBuffer { node: web_sys::AudioBufferSourceNode }, decoder: Decoder,
is_stereo: bool,
}, // closure: Option<Closure<Box<FnMut(web_sys::AudioProcessingEvent)>>> } ,
AudioBuffer {
node: web_sys::AudioBufferSourceNode,
},
} }
type Error = Box<dyn std::error::Error>; type Error = Box<dyn std::error::Error>;
@ -76,14 +81,11 @@ impl WebAudioBackend {
let audio_buffer = audio_buffer.borrow(); let audio_buffer = audio_buffer.borrow();
let node = self.context.create_buffer_source().unwrap(); let node = self.context.create_buffer_source().unwrap();
node.set_buffer(Some(&*audio_buffer)); node.set_buffer(Some(&*audio_buffer));
node node.connect_with_audio_node(&self.context.destination())
.connect_with_audio_node(&self.context.destination())
.unwrap(); .unwrap();
node.start().unwrap(); node.start().unwrap();
let audio_stream = AudioStream::AudioBuffer { let audio_stream = AudioStream::AudioBuffer { node };
node
};
STREAMS.with(|streams| { STREAMS.with(|streams| {
let mut streams = streams.borrow_mut(); let mut streams = streams.borrow_mut();
streams.insert(audio_stream) streams.insert(audio_stream)
@ -91,28 +93,33 @@ impl WebAudioBackend {
} }
SoundSource::Decoder(audio_data) => { SoundSource::Decoder(audio_data) => {
let decoder: Decoder = match sound.format.compression { let decoder: Decoder = match sound.format.compression {
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new( AudioCompression::Adpcm => Box::new(
std::io::Cursor::new(audio_data.to_vec()), AdpcmDecoder::new(
std::io::Cursor::new(audio_data.to_vec()),
sound.format.is_stereo, sound.format.is_stereo,
sound.format.sample_rate sound.format.sample_rate,
).unwrap()), )
.unwrap(),
),
AudioCompression::Mp3 => Box::new(Mp3Decoder::new( AudioCompression::Mp3 => Box::new(Mp3Decoder::new(
if sound.format.is_stereo { if sound.format.is_stereo { 2 } else { 1 },
2
} else {
1
},
sound.format.sample_rate.into(), sound.format.sample_rate.into(),
std::io::Cursor::new(audio_data.to_vec())//&sound.data[..] std::io::Cursor::new(audio_data.to_vec()), //&sound.data[..]
)), )),
_ => unimplemented!() _ => unimplemented!(),
}; };
let decoder: Decoder = if sound.format.sample_rate != self.context.sample_rate() as u16 { let decoder: Decoder =
Box::new(resample(decoder, sound.format.sample_rate, self.context.sample_rate() as u16, sound.format.is_stereo)) if sound.format.sample_rate != self.context.sample_rate() as u16 {
} else { Box::new(resample(
decoder decoder,
}; sound.format.sample_rate,
self.context.sample_rate() as u16,
sound.format.is_stereo,
))
} else {
decoder
};
let audio_stream = AudioStream::Decoder { let audio_stream = AudioStream::Decoder {
decoder, decoder,
@ -124,7 +131,6 @@ impl WebAudioBackend {
let stream_handle = streams.insert(audio_stream); let stream_handle = streams.insert(audio_stream);
let script_processor_node = self.context.create_script_processor_with_buffer_size_and_number_of_input_channels_and_number_of_output_channels(4096, 0, if sound.format.is_stereo { 2 } else { 1 }).unwrap(); let script_processor_node = self.context.create_script_processor_with_buffer_size_and_number_of_input_channels_and_number_of_output_channels(4096, 0, if sound.format.is_stereo { 2 } else { 1 }).unwrap();
let script_node = script_processor_node.clone(); let script_node = script_processor_node.clone();
let closure = Closure::wrap(Box::new(move |event| { let closure = Closure::wrap(Box::new(move |event| {
STREAMS.with(|streams| { STREAMS.with(|streams| {
let mut streams = streams.borrow_mut(); let mut streams = streams.borrow_mut();
@ -146,7 +152,12 @@ impl WebAudioBackend {
} }
} }
fn decompress_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], num_sample_frames: u32) -> AudioBufferPtr { fn decompress_to_audio_buffer(
&mut self,
format: &swf::SoundFormat,
audio_data: &[u8],
num_sample_frames: u32,
) -> AudioBufferPtr {
if format.compression == AudioCompression::Mp3 { if format.compression == AudioCompression::Mp3 {
return self.decompress_mp3_to_audio_buffer(format, audio_data, num_sample_frames); return self.decompress_mp3_to_audio_buffer(format, audio_data, num_sample_frames);
} }
@ -154,32 +165,45 @@ impl WebAudioBackend {
// This sucks. Firefox doesn't like 5512Hz sample rate, so manually double up the samples. // This sucks. Firefox doesn't like 5512Hz sample rate, so manually double up the samples.
// 5512Hz should be relatively rare. // 5512Hz should be relatively rare.
let audio_buffer = if format.sample_rate > 5512 { let audio_buffer = if format.sample_rate > 5512 {
self.context.create_buffer( self.context
if format.is_stereo { 2 } else { 1 }, .create_buffer(
num_sample_frames, if format.is_stereo { 2 } else { 1 },
f32::from(format.sample_rate) num_sample_frames,
).unwrap() f32::from(format.sample_rate),
)
.unwrap()
} else { } else {
self.context.create_buffer( self.context
if format.is_stereo { 2 } else { 1 }, .create_buffer(
num_sample_frames * 2, if format.is_stereo { 2 } else { 1 },
11025.0 num_sample_frames * 2,
).unwrap() 11025.0,
)
.unwrap()
}; };
match format.compression { match format.compression {
AudioCompression::Uncompressed => { AudioCompression::Uncompressed => {
// TODO: Check for is_16_bit. // TODO: Check for is_16_bit.
self.left_samples = audio_data.iter().step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect(); self.left_samples = audio_data
.iter()
.step_by(2)
.cloned()
.map(|n| f32::from(n) / 32767.0)
.collect();
if format.is_stereo { if format.is_stereo {
self.right_samples = audio_data.iter().skip(1).step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect(); self.right_samples = audio_data
.iter()
.skip(1)
.step_by(2)
.cloned()
.map(|n| f32::from(n) / 32767.0)
.collect();
} }
} }
AudioCompression::Adpcm => { AudioCompression::Adpcm => {
let mut decoder = AdpcmDecoder::new(audio_data, let mut decoder =
format.is_stereo, AdpcmDecoder::new(audio_data, format.is_stereo, format.sample_rate).unwrap();
format.sample_rate
).unwrap();
if format.is_stereo { if format.is_stereo {
while let (Some(l), Some(r)) = (decoder.next(), decoder.next()) { while let (Some(l), Some(r)) = (decoder.next(), decoder.next()) {
self.left_samples.push(f32::from(l) / 32767.0); self.left_samples.push(f32::from(l) / 32767.0);
@ -211,18 +235,30 @@ impl WebAudioBackend {
} }
} }
audio_buffer.copy_to_channel(&mut self.left_samples, 0).unwrap(); audio_buffer
.copy_to_channel(&mut self.left_samples, 0)
.unwrap();
if format.is_stereo { if format.is_stereo {
audio_buffer.copy_to_channel(&mut self.right_samples, 1).unwrap(); audio_buffer
.copy_to_channel(&mut self.right_samples, 1)
.unwrap();
} }
Rc::new(RefCell::new(audio_buffer)) Rc::new(RefCell::new(audio_buffer))
} }
fn decompress_mp3_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], _num_sample_frames: u32) -> AudioBufferPtr { fn decompress_mp3_to_audio_buffer(
&mut self,
format: &swf::SoundFormat,
audio_data: &[u8],
_num_sample_frames: u32,
) -> AudioBufferPtr {
// We use the Web decodeAudioData API to decode MP3 data. // We use the Web decodeAudioData API to decode MP3 data.
// TODO: Is it possible we finish loading before the MP3 is decoding? // TODO: Is it possible we finish loading before the MP3 is decoding?
let audio_buffer = self.context.create_buffer(1, 1, self.context.sample_rate()).unwrap(); let audio_buffer = self
.context
.create_buffer(1, 1, self.context.sample_rate())
.unwrap();
let audio_buffer = Rc::new(RefCell::new(audio_buffer)); let audio_buffer = Rc::new(RefCell::new(audio_buffer));
let data_array = unsafe { js_sys::Uint8Array::view(&audio_data[..]) }; let data_array = unsafe { js_sys::Uint8Array::view(&audio_data[..]) };
@ -238,18 +274,18 @@ impl WebAudioBackend {
let success_closure = Closure::wrap(Box::new(move |buffer: web_sys::AudioBuffer| { let success_closure = Closure::wrap(Box::new(move |buffer: web_sys::AudioBuffer| {
*buffer_ptr.borrow_mut() = buffer; *buffer_ptr.borrow_mut() = buffer;
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1)); NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
}) }) as Box<dyn FnMut(web_sys::AudioBuffer)>);
as Box<dyn FnMut(web_sys::AudioBuffer)>);
let error_closure = Closure::wrap(Box::new(move || { let error_closure = Closure::wrap(Box::new(move || {
log::info!("Error decoding MP3 audio"); log::info!("Error decoding MP3 audio");
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1)); NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
}) }) as Box<dyn FnMut()>);
as Box<dyn FnMut()>); self.context
self.context.decode_audio_data_with_success_callback_and_error_callback( .decode_audio_data_with_success_callback_and_error_callback(
&array_buffer, &array_buffer,
success_closure.as_ref().unchecked_ref(), success_closure.as_ref().unchecked_ref(),
error_closure.as_ref().unchecked_ref() error_closure.as_ref().unchecked_ref(),
).unwrap(); )
.unwrap();
// TODO: This will leak memory (once per decompressed MP3). // TODO: This will leak memory (once per decompressed MP3).
// Not a huge deal as there are probably not many MP3s in an SWF. // Not a huge deal as there are probably not many MP3s in an SWF.
@ -266,7 +302,10 @@ impl WebAudioBackend {
let mut complete = false; let mut complete = false;
let mut left_samples = vec![]; let mut left_samples = vec![];
let mut right_samples = vec![]; let mut right_samples = vec![];
if let AudioStream::Decoder { decoder, is_stereo, .. } = audio_stream { if let AudioStream::Decoder {
decoder, is_stereo, ..
} = audio_stream
{
let output_buffer = event.output_buffer().unwrap(); let output_buffer = event.output_buffer().unwrap();
let num_frames = output_buffer.length() as usize; let num_frames = output_buffer.length() as usize;
@ -281,9 +320,13 @@ impl WebAudioBackend {
break; break;
} }
} }
output_buffer.copy_to_channel(&mut left_samples[..], 0).unwrap(); output_buffer
.copy_to_channel(&mut left_samples[..], 0)
.unwrap();
if *is_stereo { if *is_stereo {
output_buffer.copy_to_channel(&mut right_samples[..], 1).unwrap(); output_buffer
.copy_to_channel(&mut right_samples[..], 1)
.unwrap();
} }
} }
@ -302,32 +345,42 @@ impl AudioBackend for WebAudioBackend {
let sound = Sound { let sound = Sound {
format: sound.format.clone(), format: sound.format.clone(),
source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(&sound.format, data, sound.num_samples)), source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(
&sound.format,
data,
sound.num_samples,
)),
}; };
Ok(self.sounds.insert(sound)) Ok(self.sounds.insert(sound))
} }
fn preload_sound_stream_head(&mut self, clip_id: swf::CharacterId, stream_info: &swf::SoundStreamHead) { fn preload_sound_stream_head(
self.stream_data.entry(clip_id).or_insert_with(|| { &mut self,
StreamData { clip_id: swf::CharacterId,
stream_info: &swf::SoundStreamHead,
) {
self.stream_data
.entry(clip_id)
.or_insert_with(|| StreamData {
format: stream_info.stream_format.clone(), format: stream_info.stream_format.clone(),
audio_data: vec![], audio_data: vec![],
num_sample_frames: 0, num_sample_frames: 0,
samples_per_block: stream_info.num_samples_per_block.into(), samples_per_block: stream_info.num_samples_per_block.into(),
} });
});
} }
fn preload_sound_stream_block(&mut self, clip_id: swf::CharacterId, audio_data: &[u8]) { fn preload_sound_stream_block(&mut self, clip_id: swf::CharacterId, audio_data: &[u8]) {
if let Some(stream) = self.stream_data.get_mut(&clip_id) { if let Some(stream) = self.stream_data.get_mut(&clip_id) {
match stream.format.compression { match stream.format.compression {
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => { AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
let frame_len = if stream.format.is_stereo { 2 } else { 1 } * if stream.format.is_16_bit { 2 } else { 1 }; let frame_len = if stream.format.is_stereo { 2 } else { 1 }
* if stream.format.is_16_bit { 2 } else { 1 };
stream.num_sample_frames += (audio_data.len() as u32) / frame_len; stream.num_sample_frames += (audio_data.len() as u32) / frame_len;
stream.audio_data.extend_from_slice(audio_data); stream.audio_data.extend_from_slice(audio_data);
} }
AudioCompression::Mp3 => { AudioCompression::Mp3 => {
let num_sample_frames = (u32::from(audio_data[2]) << 8) | u32::from(audio_data[3]); let num_sample_frames =
(u32::from(audio_data[2]) << 8) | u32::from(audio_data[3]);
stream.num_sample_frames += num_sample_frames; stream.num_sample_frames += num_sample_frames;
// MP3 streaming data: // MP3 streaming data:
// First two bytes = number of samples // First two bytes = number of samples
@ -344,9 +397,12 @@ impl AudioBackend for WebAudioBackend {
fn preload_sound_stream_end(&mut self, clip_id: swf::CharacterId) { fn preload_sound_stream_end(&mut self, clip_id: swf::CharacterId) {
if let Some(stream) = self.stream_data.remove(&clip_id) { if let Some(stream) = self.stream_data.remove(&clip_id) {
if !stream.audio_data.is_empty() if !stream.audio_data.is_empty() {
{ let audio_buffer = self.decompress_to_audio_buffer(
let audio_buffer = self.decompress_to_audio_buffer(&stream.format, &stream.audio_data[..], stream.num_sample_frames); &stream.format,
&stream.audio_data[..],
stream.num_sample_frames,
);
let handle = self.sounds.insert(Sound { let handle = self.sounds.insert(Sound {
format: stream.format, format: stream.format,
source: SoundSource::AudioBuffer(audio_buffer), source: SoundSource::AudioBuffer(audio_buffer),
@ -387,7 +443,12 @@ impl AudioBackend for WebAudioBackend {
// Janky resmapling code. // Janky resmapling code.
// TODO: Clean this up. // TODO: Clean this up.
fn resample(mut input: impl Iterator<Item=i16>, input_sample_rate: u16, output_sample_rate: u16, is_stereo: bool) -> impl Iterator<Item=i16> { fn resample(
mut input: impl Iterator<Item = i16>,
input_sample_rate: u16,
output_sample_rate: u16,
is_stereo: bool,
) -> impl Iterator<Item = i16> {
let (mut left0, mut right0) = if is_stereo { let (mut left0, mut right0) = if is_stereo {
(input.next(), input.next()) (input.next(), input.next())
} else { } else {