ruffle/core/src/avm2/bytearray.rs

446 lines
14 KiB
Rust
Raw Normal View History

2021-03-05 23:01:02 +00:00
use crate::avm2::Error;
use flate2::read::*;
use flate2::Compression;
use gc_arena::Collect;
2021-06-22 08:26:27 +00:00
use std::cell::Cell;
2021-03-05 23:01:02 +00:00
use std::cmp;
2021-06-22 08:26:27 +00:00
use std::fmt::{self, Display, Formatter};
2021-03-05 23:01:02 +00:00
use std::io::prelude::*;
2021-06-22 08:26:27 +00:00
use std::io::{self, Read, SeekFrom};
use std::str::FromStr;
2021-03-05 23:01:02 +00:00
#[derive(Clone, Collect, Debug, Copy)]
2021-03-05 23:01:02 +00:00
#[collect(no_drop)]
pub enum Endian {
Big,
Little,
}
2021-06-22 08:26:27 +00:00
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CompressionAlgorithm {
Zlib,
Deflate,
Lzma,
}
impl Display for CompressionAlgorithm {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let s = match *self {
CompressionAlgorithm::Zlib => "zlib",
CompressionAlgorithm::Deflate => "deflate",
CompressionAlgorithm::Lzma => "lzma",
};
f.write_str(s)
}
}
impl FromStr for CompressionAlgorithm {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"zlib" => CompressionAlgorithm::Zlib,
"deflate" => CompressionAlgorithm::Deflate,
"lzma" => CompressionAlgorithm::Lzma,
_ => return Err("Unknown compression algorithm".into()),
})
}
}
#[derive(Clone, Collect, Debug, Copy)]
#[collect(no_drop)]
pub enum ObjectEncoding {
Amf0,
Amf3,
}
2021-03-05 23:01:02 +00:00
#[derive(Clone, Collect, Debug)]
#[collect(no_drop)]
pub struct ByteArrayStorage {
/// Underlying ByteArray
bytes: Vec<u8>,
2021-06-22 08:26:27 +00:00
/// The current position to read/write from
position: Cell<usize>,
2021-03-05 23:01:02 +00:00
2021-06-22 08:26:27 +00:00
/// This represents what endian to use while reading/writing data.
2021-03-05 23:01:02 +00:00
endian: Endian,
/// The encoding used when serializing/deserializing using readObject/writeObject
object_encoding: ObjectEncoding,
2021-03-05 23:01:02 +00:00
}
impl ByteArrayStorage {
/// Create a new ByteArrayStorage
pub fn new() -> ByteArrayStorage {
ByteArrayStorage {
bytes: Vec::new(),
2021-06-22 08:26:27 +00:00
position: Cell::new(0),
2021-03-05 23:01:02 +00:00
endian: Endian::Big,
object_encoding: ObjectEncoding::Amf3,
}
}
/// Create a new ByteArrayStorage using an already existing vector
pub fn from_vec(bytes: Vec<u8>) -> ByteArrayStorage {
ByteArrayStorage {
2021-08-09 21:07:32 +00:00
bytes,
position: Cell::new(0),
endian: Endian::Big,
object_encoding: ObjectEncoding::Amf3,
2021-03-05 23:01:02 +00:00
}
}
2021-06-22 08:26:27 +00:00
/// Write bytes at the next position in the ByteArray, growing if needed.
#[inline]
pub fn write_bytes(&mut self, buf: &[u8]) -> Result<(), Error> {
self.write_at(buf, self.position.get())?;
self.position.set(self.position.get() + buf.len());
Ok(())
2021-03-05 23:01:02 +00:00
}
#[inline]
pub fn write_bytes_within(&mut self, start: usize, amnt: usize) -> Result<(), Error> {
self.write_at_within(start, amnt, self.position.get())?;
self.position.set(self.position.get() + amnt);
Ok(())
}
2021-06-22 08:26:27 +00:00
/// Reads any amount of bytes from the current position in the ByteArray
#[inline]
pub fn read_bytes(&self, amnt: usize) -> Result<&[u8], Error> {
let bytes = self.read_at(amnt, self.position.get())?;
self.position.set(self.position.get() + amnt);
Ok(bytes)
}
2021-03-05 23:01:02 +00:00
2021-06-22 08:26:27 +00:00
/// Reads any amount of bytes at any offset in the ByteArray
#[inline]
pub fn read_at(&self, amnt: usize, offset: usize) -> Result<&[u8], Error> {
self.bytes
.get(offset..)
.and_then(|bytes| bytes.get(..amnt))
.ok_or_else(|| "EOFError: Reached EOF".into())
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
/// Write bytes at any offset in the ByteArray
/// Will automatically grow the ByteArray to fit the new buffer
pub fn write_at(&mut self, buf: &[u8], offset: usize) -> Result<(), Error> {
let new_len = offset
.checked_add(buf.len())
.ok_or("RangeError: Cannot overflow usize")?;
if self.len() < new_len {
self.set_length(new_len);
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
// SAFETY:
2021-08-28 23:09:31 +00:00
// 1. The amount of bytes from the start of the underyling buffer + offset is guarunteed to be able to fit the buffer we are writing because we just resized it.
// 2. The borrow checker will guaruntee that `buf` is not a slice of `self.bytes`, because we have mutable (exclusive) access to `self`.
2021-06-22 08:26:27 +00:00
unsafe {
self.bytes
.as_mut_ptr()
.add(offset)
.copy_from_nonoverlapping(buf.as_ptr(), buf.len())
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
Ok(())
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
/// Write bytes at any offset in the ByteArray
/// Will return an error if the new buffer does not fit the ByteArray
pub fn write_at_nongrowing(&mut self, buf: &[u8], offset: usize) -> Result<(), Error> {
unsafe {
self.bytes
.get_mut(offset..)
.and_then(|bytes| bytes.get_mut(..buf.len()))
.ok_or("RangeError: The specified range is invalid")?
.as_mut_ptr()
2021-08-28 23:09:31 +00:00
// SAFETY:
// 1. `buf` is garunteed to be the same length as the slice we are writing to.
// 2. The borrow checker will guaruntee that `buf` is not a slice of `self.bytes`, because we have mutable (exclusive) access to `self`.
.copy_from_nonoverlapping(buf.as_ptr(), buf.len());
}
Ok(())
}
/// Write bytes at any offset in the ByteArray from within the current ByteArray using a memmove.
/// Will automatically grow the ByteArray to fit the new buffer
pub fn write_at_within(
&mut self,
start: usize,
amnt: usize,
offset: usize,
) -> Result<(), Error> {
// First verify that reading from `start` to `amnt` is valid
start
.checked_add(amnt)
2021-08-20 00:15:32 +00:00
.filter(|result| *result <= self.len())
.ok_or("RangeError: Reached EOF")?;
// Second we resize our underlying buffer to ensure that writing `amnt` from `offset` is valid.
let new_len = offset
.checked_add(amnt)
.ok_or("RangeError: Cannot overflow usize")?;
if self.len() < new_len {
self.set_length(new_len);
}
unsafe {
let ptr = self.bytes.as_mut_ptr();
let src_ptr = ptr.add(start);
let dest_ptr = ptr.add(offset);
// SAFETY:
// 1. We validated that `start` is within the range of our underlying buffer up until `amnt`, so it is safe to read from `start` to `amnt`.
// 2. We are garunteed to have enough room in our underlying buffer for `amnt`, because we just resized it.
std::ptr::copy(src_ptr, dest_ptr, amnt);
}
2021-06-22 08:26:27 +00:00
Ok(())
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
/// Compress the ByteArray into a temporary buffer
pub fn compress(&mut self, algorithm: CompressionAlgorithm) -> Result<Vec<u8>, Error> {
2021-03-05 23:01:02 +00:00
let mut buffer = Vec::new();
2021-06-22 08:26:27 +00:00
match algorithm {
CompressionAlgorithm::Zlib => {
let mut compresser = ZlibEncoder::new(&*self.bytes, Compression::fast());
compresser.read_to_end(&mut buffer)?;
}
CompressionAlgorithm::Deflate => {
let mut compresser = DeflateEncoder::new(&*self.bytes, Compression::fast());
compresser.read_to_end(&mut buffer)?;
}
#[cfg(feature = "lzma")]
CompressionAlgorithm::Lzma => lzma_rs::lzma_compress(&mut &*self.bytes, &mut buffer)?,
#[cfg(not(feature = "lzma"))]
CompressionAlgorithm::Lzma => {
return Err("Ruffle was not compiled with LZMA support".into())
}
}
2021-03-05 23:01:02 +00:00
Ok(buffer)
}
2021-06-22 08:26:27 +00:00
/// Decompress the ByteArray into a temporary buffer
pub fn decompress(&mut self, algorithm: CompressionAlgorithm) -> Result<Vec<u8>, Error> {
2021-03-05 23:01:02 +00:00
let mut buffer = Vec::new();
2021-06-22 08:26:27 +00:00
match algorithm {
CompressionAlgorithm::Zlib => {
let mut compresser = ZlibDecoder::new(&*self.bytes);
compresser.read_to_end(&mut buffer)?;
}
CompressionAlgorithm::Deflate => {
let mut compresser = DeflateDecoder::new(&*self.bytes);
compresser.read_to_end(&mut buffer)?;
}
#[cfg(feature = "lzma")]
CompressionAlgorithm::Lzma => lzma_rs::lzma_decompress(&mut &*self.bytes, &mut buffer)?,
#[cfg(not(feature = "lzma"))]
CompressionAlgorithm::Lzma => {
return Err("Ruffle was not compiled with LZMA support".into())
}
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
Ok(buffer)
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
pub fn read_utf(&self) -> Result<String, Error> {
2021-03-05 23:01:02 +00:00
let len = self.read_unsigned_short()?;
2021-06-22 08:26:27 +00:00
let val = String::from_utf8_lossy(self.read_bytes(len.into())?);
2021-03-05 23:01:02 +00:00
Ok(val.into_owned())
}
2021-06-22 08:26:27 +00:00
pub fn write_boolean(&mut self, val: bool) -> Result<(), Error> {
self.write_bytes(&[val as u8; 1])
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
pub fn read_boolean(&self) -> Result<bool, Error> {
Ok(self.read_bytes(1)? != [0])
2021-03-05 23:01:02 +00:00
}
// Writes a UTF String into the buffer, with its length as a prefix
pub fn write_utf(&mut self, utf_string: &str) -> Result<(), Error> {
if let Ok(str_size) = u16::try_from(utf_string.len()) {
2021-06-22 08:26:27 +00:00
self.write_unsigned_short(str_size)?;
self.write_bytes(utf_string.as_bytes())
2021-03-05 23:01:02 +00:00
} else {
2021-06-22 08:26:27 +00:00
Err("RangeError: UTF String length must fit into a short".into())
2021-03-05 23:01:02 +00:00
}
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn clear(&mut self) {
self.bytes.clear();
self.position.set(0)
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn shrink_to_fit(&mut self) {
self.bytes.shrink_to_fit()
}
#[inline]
pub fn set_length(&mut self, new_len: usize) {
self.bytes.resize(new_len, 0);
}
pub fn get(&self, pos: usize) -> Option<u8> {
self.bytes.get(pos).copied()
2021-04-05 00:23:50 +00:00
}
2021-03-05 23:01:02 +00:00
pub fn set(&mut self, item: usize, value: u8) {
2021-06-22 08:26:27 +00:00
if self.len() < (item + 1) {
2021-03-05 23:01:02 +00:00
self.bytes.resize(item + 1, 0)
}
*self.bytes.get_mut(item).unwrap() = value;
}
pub fn delete(&mut self, item: usize) {
if let Some(i) = self.bytes.get_mut(item) {
*i = 0;
}
}
2021-06-22 08:26:27 +00:00
#[inline]
2021-07-22 02:45:12 +00:00
pub fn bytes(&self) -> &[u8] {
2021-03-05 23:01:02 +00:00
&self.bytes
}
#[inline]
pub fn bytes_mut(&mut self) -> &mut [u8] {
&mut self.bytes
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn len(&self) -> usize {
self.bytes.len()
}
#[inline]
2021-03-05 23:01:02 +00:00
pub fn position(&self) -> usize {
2021-06-22 08:26:27 +00:00
self.position.get()
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn set_position(&self, pos: usize) {
self.position.set(pos);
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn endian(&self) -> Endian {
self.endian
2021-03-05 23:01:02 +00:00
}
2021-06-22 08:26:27 +00:00
#[inline]
2021-03-05 23:01:02 +00:00
pub fn set_endian(&mut self, new_endian: Endian) {
self.endian = new_endian;
}
#[inline]
pub fn object_encoding(&self) -> ObjectEncoding {
self.object_encoding
}
#[inline]
pub fn set_object_encoding(&mut self, new_object_encoding: ObjectEncoding) {
self.object_encoding = new_object_encoding;
}
2021-06-22 08:26:27 +00:00
#[inline]
pub fn bytes_available(&self) -> usize {
self.len().saturating_sub(self.position.get())
}
2021-03-05 23:01:02 +00:00
}
impl Write for ByteArrayStorage {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
2021-06-22 08:26:27 +00:00
self.write_bytes(buf).map_err(|_| {
io::Error::new(io::ErrorKind::Other, "Failed to write to ByteArrayStorage")
})?;
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
2021-04-17 02:04:56 +00:00
2021-06-22 08:26:27 +00:00
impl Read for ByteArrayStorage {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self
.read_bytes(cmp::min(buf.len(), self.bytes_available()))
.map_err(|_| {
io::Error::new(io::ErrorKind::Other, "Failed to read from ByteArrayStorage")
})?;
buf[..bytes.len()].copy_from_slice(bytes);
Ok(bytes.len())
}
}
impl Seek for ByteArrayStorage {
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.position.set(n as usize);
return Ok(n);
}
SeekFrom::End(n) => (self.len(), n),
SeekFrom::Current(n) => (self.position.get(), n),
};
let new_pos = if offset >= 0 {
base_pos.checked_add(offset as usize)
} else {
base_pos.checked_sub((offset.wrapping_neg()) as usize)
};
match new_pos {
Some(n) => {
self.position.set(n);
Ok(n as u64)
}
None => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
}
macro_rules! impl_write{
($($method_name:ident $data_type:ty ), *)
=>
{
impl ByteArrayStorage {
$( pub fn $method_name (&mut self, val: $data_type) -> Result<(), Error> {
let val_bytes = match self.endian {
Endian::Big => val.to_be_bytes(),
Endian::Little => val.to_le_bytes(),
};
self.write_bytes(&val_bytes)
} )*
}
}
}
macro_rules! impl_read{
($($method_name:ident $size:expr; $data_type:ty ), *)
=>
{
impl ByteArrayStorage {
$( pub fn $method_name (&self) -> Result<$data_type, Error> {
Ok(match self.endian {
Endian::Big => <$data_type>::from_be_bytes(self.read_bytes($size)?.try_into().unwrap()),
Endian::Little => <$data_type>::from_le_bytes(self.read_bytes($size)?.try_into().unwrap())
})
} )*
}
}
}
impl_write!(write_float f32, write_double f64, write_int i32, write_unsigned_int u32, write_short i16, write_unsigned_short u16);
impl_read!(read_float 4; f32, read_double 8; f64, read_int 4; i32, read_unsigned_int 4; u32, read_short 2; i16, read_unsigned_short 2; u16, read_byte 1; i8, read_unsigned_byte 1; u8);
2021-04-17 02:04:56 +00:00
impl Default for ByteArrayStorage {
fn default() -> Self {
Self::new()
}
}