chore: Add rustfmt.toml and rustfmt pass

This commit is contained in:
Mike Welsh 2019-08-26 16:38:37 -07:00
parent 06d9f39c0e
commit 6a5c5ab1df
21 changed files with 6159 additions and 6094 deletions

View File

@ -31,7 +31,10 @@ impl TransformStack {
let cur_transform = self.transform();
let matrix = cur_transform.matrix * transform.matrix;
let color_transform = cur_transform.color_transform * transform.color_transform;
self.0.push(Transform { matrix, color_transform });
self.0.push(Transform {
matrix,
color_transform,
});
}
pub fn pop(&mut self) {

1
rustfmt.toml Normal file
View File

@ -0,0 +1 @@
newline_style = "Unix"

View File

@ -1,8 +1,8 @@
use fnv::FnvHashMap;
use generational_arena::Arena;
use ruffle_core::backend::audio::decoders::{AdpcmDecoder, Mp3Decoder};
use ruffle_core::backend::audio::{AudioBackend, AudioStreamHandle, SoundHandle};
use ruffle_core::backend::audio::swf::{self, AudioCompression};
use ruffle_core::backend::audio::{AudioBackend, AudioStreamHandle, SoundHandle};
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use wasm_bindgen::{closure::Closure, JsCast};
@ -46,12 +46,17 @@ struct Sound {
source: SoundSource,
}
type Decoder = Box<dyn Iterator<Item=i16>>;
type Decoder = Box<dyn Iterator<Item = i16>>;
#[allow(dead_code)]
enum AudioStream {
Decoder { decoder: Decoder, is_stereo: bool, },// closure: Option<Closure<Box<FnMut(web_sys::AudioProcessingEvent)>>> } ,
AudioBuffer { node: web_sys::AudioBufferSourceNode },
Decoder {
decoder: Decoder,
is_stereo: bool,
}, // closure: Option<Closure<Box<FnMut(web_sys::AudioProcessingEvent)>>> } ,
AudioBuffer {
node: web_sys::AudioBufferSourceNode,
},
}
type Error = Box<dyn std::error::Error>;
@ -76,14 +81,11 @@ impl WebAudioBackend {
let audio_buffer = audio_buffer.borrow();
let node = self.context.create_buffer_source().unwrap();
node.set_buffer(Some(&*audio_buffer));
node
.connect_with_audio_node(&self.context.destination())
node.connect_with_audio_node(&self.context.destination())
.unwrap();
node.start().unwrap();
let audio_stream = AudioStream::AudioBuffer {
node
};
let audio_stream = AudioStream::AudioBuffer { node };
STREAMS.with(|streams| {
let mut streams = streams.borrow_mut();
streams.insert(audio_stream)
@ -91,25 +93,30 @@ impl WebAudioBackend {
}
SoundSource::Decoder(audio_data) => {
let decoder: Decoder = match sound.format.compression {
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new(
AudioCompression::Adpcm => Box::new(
AdpcmDecoder::new(
std::io::Cursor::new(audio_data.to_vec()),
sound.format.is_stereo,
sound.format.sample_rate
).unwrap()),
sound.format.sample_rate,
)
.unwrap(),
),
AudioCompression::Mp3 => Box::new(Mp3Decoder::new(
if sound.format.is_stereo {
2
} else {
1
},
if sound.format.is_stereo { 2 } else { 1 },
sound.format.sample_rate.into(),
std::io::Cursor::new(audio_data.to_vec())//&sound.data[..]
std::io::Cursor::new(audio_data.to_vec()), //&sound.data[..]
)),
_ => unimplemented!()
_ => unimplemented!(),
};
let decoder: Decoder = if sound.format.sample_rate != self.context.sample_rate() as u16 {
Box::new(resample(decoder, sound.format.sample_rate, self.context.sample_rate() as u16, sound.format.is_stereo))
let decoder: Decoder =
if sound.format.sample_rate != self.context.sample_rate() as u16 {
Box::new(resample(
decoder,
sound.format.sample_rate,
self.context.sample_rate() as u16,
sound.format.is_stereo,
))
} else {
decoder
};
@ -124,7 +131,6 @@ impl WebAudioBackend {
let stream_handle = streams.insert(audio_stream);
let script_processor_node = self.context.create_script_processor_with_buffer_size_and_number_of_input_channels_and_number_of_output_channels(4096, 0, if sound.format.is_stereo { 2 } else { 1 }).unwrap();
let script_node = script_processor_node.clone();
let closure = Closure::wrap(Box::new(move |event| {
STREAMS.with(|streams| {
let mut streams = streams.borrow_mut();
@ -146,7 +152,12 @@ impl WebAudioBackend {
}
}
fn decompress_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], num_sample_frames: u32) -> AudioBufferPtr {
fn decompress_to_audio_buffer(
&mut self,
format: &swf::SoundFormat,
audio_data: &[u8],
num_sample_frames: u32,
) -> AudioBufferPtr {
if format.compression == AudioCompression::Mp3 {
return self.decompress_mp3_to_audio_buffer(format, audio_data, num_sample_frames);
}
@ -154,32 +165,45 @@ impl WebAudioBackend {
// This sucks. Firefox doesn't like 5512Hz sample rate, so manually double up the samples.
// 5512Hz should be relatively rare.
let audio_buffer = if format.sample_rate > 5512 {
self.context.create_buffer(
self.context
.create_buffer(
if format.is_stereo { 2 } else { 1 },
num_sample_frames,
f32::from(format.sample_rate)
).unwrap()
f32::from(format.sample_rate),
)
.unwrap()
} else {
self.context.create_buffer(
self.context
.create_buffer(
if format.is_stereo { 2 } else { 1 },
num_sample_frames * 2,
11025.0
).unwrap()
11025.0,
)
.unwrap()
};
match format.compression {
AudioCompression::Uncompressed => {
// TODO: Check for is_16_bit.
self.left_samples = audio_data.iter().step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect();
self.left_samples = audio_data
.iter()
.step_by(2)
.cloned()
.map(|n| f32::from(n) / 32767.0)
.collect();
if format.is_stereo {
self.right_samples = audio_data.iter().skip(1).step_by(2).cloned().map(|n| f32::from(n) / 32767.0).collect();
self.right_samples = audio_data
.iter()
.skip(1)
.step_by(2)
.cloned()
.map(|n| f32::from(n) / 32767.0)
.collect();
}
}
AudioCompression::Adpcm => {
let mut decoder = AdpcmDecoder::new(audio_data,
format.is_stereo,
format.sample_rate
).unwrap();
let mut decoder =
AdpcmDecoder::new(audio_data, format.is_stereo, format.sample_rate).unwrap();
if format.is_stereo {
while let (Some(l), Some(r)) = (decoder.next(), decoder.next()) {
self.left_samples.push(f32::from(l) / 32767.0);
@ -211,18 +235,30 @@ impl WebAudioBackend {
}
}
audio_buffer.copy_to_channel(&mut self.left_samples, 0).unwrap();
audio_buffer
.copy_to_channel(&mut self.left_samples, 0)
.unwrap();
if format.is_stereo {
audio_buffer.copy_to_channel(&mut self.right_samples, 1).unwrap();
audio_buffer
.copy_to_channel(&mut self.right_samples, 1)
.unwrap();
}
Rc::new(RefCell::new(audio_buffer))
}
fn decompress_mp3_to_audio_buffer(&mut self, format: &swf::SoundFormat, audio_data: &[u8], _num_sample_frames: u32) -> AudioBufferPtr {
fn decompress_mp3_to_audio_buffer(
&mut self,
format: &swf::SoundFormat,
audio_data: &[u8],
_num_sample_frames: u32,
) -> AudioBufferPtr {
// We use the Web decodeAudioData API to decode MP3 data.
// TODO: Is it possible we finish loading before the MP3 is decoding?
let audio_buffer = self.context.create_buffer(1, 1, self.context.sample_rate()).unwrap();
let audio_buffer = self
.context
.create_buffer(1, 1, self.context.sample_rate())
.unwrap();
let audio_buffer = Rc::new(RefCell::new(audio_buffer));
let data_array = unsafe { js_sys::Uint8Array::view(&audio_data[..]) };
@ -238,18 +274,18 @@ impl WebAudioBackend {
let success_closure = Closure::wrap(Box::new(move |buffer: web_sys::AudioBuffer| {
*buffer_ptr.borrow_mut() = buffer;
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
})
as Box<dyn FnMut(web_sys::AudioBuffer)>);
}) as Box<dyn FnMut(web_sys::AudioBuffer)>);
let error_closure = Closure::wrap(Box::new(move || {
log::info!("Error decoding MP3 audio");
NUM_SOUNDS_LOADING.with(|n| n.set(n.get() - 1));
})
as Box<dyn FnMut()>);
self.context.decode_audio_data_with_success_callback_and_error_callback(
}) as Box<dyn FnMut()>);
self.context
.decode_audio_data_with_success_callback_and_error_callback(
&array_buffer,
success_closure.as_ref().unchecked_ref(),
error_closure.as_ref().unchecked_ref()
).unwrap();
error_closure.as_ref().unchecked_ref(),
)
.unwrap();
// TODO: This will leak memory (once per decompressed MP3).
// Not a huge deal as there are probably not many MP3s in an SWF.
@ -266,7 +302,10 @@ impl WebAudioBackend {
let mut complete = false;
let mut left_samples = vec![];
let mut right_samples = vec![];
if let AudioStream::Decoder { decoder, is_stereo, .. } = audio_stream {
if let AudioStream::Decoder {
decoder, is_stereo, ..
} = audio_stream
{
let output_buffer = event.output_buffer().unwrap();
let num_frames = output_buffer.length() as usize;
@ -281,9 +320,13 @@ impl WebAudioBackend {
break;
}
}
output_buffer.copy_to_channel(&mut left_samples[..], 0).unwrap();
output_buffer
.copy_to_channel(&mut left_samples[..], 0)
.unwrap();
if *is_stereo {
output_buffer.copy_to_channel(&mut right_samples[..], 1).unwrap();
output_buffer
.copy_to_channel(&mut right_samples[..], 1)
.unwrap();
}
}
@ -302,19 +345,27 @@ impl AudioBackend for WebAudioBackend {
let sound = Sound {
format: sound.format.clone(),
source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(&sound.format, data, sound.num_samples)),
source: SoundSource::AudioBuffer(self.decompress_to_audio_buffer(
&sound.format,
data,
sound.num_samples,
)),
};
Ok(self.sounds.insert(sound))
}
fn preload_sound_stream_head(&mut self, clip_id: swf::CharacterId, stream_info: &swf::SoundStreamHead) {
self.stream_data.entry(clip_id).or_insert_with(|| {
StreamData {
fn preload_sound_stream_head(
&mut self,
clip_id: swf::CharacterId,
stream_info: &swf::SoundStreamHead,
) {
self.stream_data
.entry(clip_id)
.or_insert_with(|| StreamData {
format: stream_info.stream_format.clone(),
audio_data: vec![],
num_sample_frames: 0,
samples_per_block: stream_info.num_samples_per_block.into(),
}
});
}
@ -322,12 +373,14 @@ impl AudioBackend for WebAudioBackend {
if let Some(stream) = self.stream_data.get_mut(&clip_id) {
match stream.format.compression {
AudioCompression::Uncompressed | AudioCompression::UncompressedUnknownEndian => {
let frame_len = if stream.format.is_stereo { 2 } else { 1 } * if stream.format.is_16_bit { 2 } else { 1 };
let frame_len = if stream.format.is_stereo { 2 } else { 1 }
* if stream.format.is_16_bit { 2 } else { 1 };
stream.num_sample_frames += (audio_data.len() as u32) / frame_len;
stream.audio_data.extend_from_slice(audio_data);
}
AudioCompression::Mp3 => {
let num_sample_frames = (u32::from(audio_data[2]) << 8) | u32::from(audio_data[3]);
let num_sample_frames =
(u32::from(audio_data[2]) << 8) | u32::from(audio_data[3]);
stream.num_sample_frames += num_sample_frames;
// MP3 streaming data:
// First two bytes = number of samples
@ -344,9 +397,12 @@ impl AudioBackend for WebAudioBackend {
fn preload_sound_stream_end(&mut self, clip_id: swf::CharacterId) {
if let Some(stream) = self.stream_data.remove(&clip_id) {
if !stream.audio_data.is_empty()
{
let audio_buffer = self.decompress_to_audio_buffer(&stream.format, &stream.audio_data[..], stream.num_sample_frames);
if !stream.audio_data.is_empty() {
let audio_buffer = self.decompress_to_audio_buffer(
&stream.format,
&stream.audio_data[..],
stream.num_sample_frames,
);
let handle = self.sounds.insert(Sound {
format: stream.format,
source: SoundSource::AudioBuffer(audio_buffer),
@ -387,7 +443,12 @@ impl AudioBackend for WebAudioBackend {
// Janky resmapling code.
// TODO: Clean this up.
fn resample(mut input: impl Iterator<Item=i16>, input_sample_rate: u16, output_sample_rate: u16, is_stereo: bool) -> impl Iterator<Item=i16> {
fn resample(
mut input: impl Iterator<Item = i16>,
input_sample_rate: u16,
output_sample_rate: u16,
is_stereo: bool,
) -> impl Iterator<Item = i16> {
let (mut left0, mut right0) = if is_stereo {
(input.next(), input.next())
} else {