Segfault-based memory bounds-checking. (#29)

* Mostly working

* Improve CircleCI releases

* Recover safely from WebAssembly-generated traps

* Add notes about async signal safety

* Improved trap catching with call_protected macro

* Improved test assert_trap code using call_protected macro

* Mostly working
This commit is contained in:
Lachlan Sneff 2018-11-25 13:51:21 -05:00 committed by Syrus Akbary
parent c62e9455e1
commit 399d253bc1
8 changed files with 91 additions and 106 deletions

View File

@ -8,9 +8,8 @@ pub use libc::putchar;
/// printf
pub extern "C" fn printf(memory_offset: i32, extra: i32, instance: &Instance) -> i32 {
debug!("emscripten::printf");
let mem = &instance.memories[0];
return unsafe {
let base_memory_offset = mem.mmap.as_ptr().offset(memory_offset as isize) as *const i8;
_printf(base_memory_offset, extra)
};
unsafe {
let addr = instance.memory_offset_addr(0, memory_offset as _) as _;
_printf(addr, extra)
}
}

View File

@ -21,7 +21,7 @@ pub extern "C" fn _emscripten_memcpy_big(
/// emscripten: getTotalMemory
pub extern "C" fn get_total_memory(instance: &mut Instance) -> u32 {
debug!("emscripten::get_total_memory");
instance.memories[0].current_size()
instance.memories[0].current_pages()
}
/// emscripten: enlargeMemory

View File

@ -3,7 +3,7 @@ use std::mem;
#[repr(transparent)]
pub struct VarArgs {
pointer: u32, // assuming 32bit wasm
pub pointer: u32, // assuming 32bit wasm
}
impl VarArgs {

View File

@ -31,6 +31,7 @@ use super::module::{Export, ImportableExportable, Module};
use super::relocation::{Reloc, RelocSink, RelocationType};
type TablesSlice = UncheckedSlice<BoundedSlice<usize>>;
// TODO: this should be `type MemoriesSlice = UncheckedSlice<UncheckedSlice<u8>>;`, but that crashes for some reason.
type MemoriesSlice = UncheckedSlice<BoundedSlice<u8>>;
type GlobalsSlice = UncheckedSlice<u8>;
@ -454,8 +455,14 @@ impl Instance {
for init in &module.info.data_initializers {
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
let offset = init.offset;
let mem_mut = memories[init.memory_index.index()].as_mut();
let to_init = &mut mem_mut[offset..offset + init.data.len()];
let mem = &mut memories[init.memory_index.index()];
let end_of_init = offset + init.data.len();
if end_of_init > mem.current_size() {
let grow_pages = (end_of_init / LinearMemory::WASM_PAGE_SIZE) + 1;
mem.grow(grow_pages as u32)
.expect("failed to grow memory for data initializers");
}
let to_init = &mut mem[offset..offset + init.data.len()];
to_init.copy_from_slice(&init.data);
}
}
@ -477,7 +484,7 @@ impl Instance {
.map(|mem| {
BoundedSlice::new(
&mem[..],
mem.current as usize * LinearMemory::WASM_PAGE_SIZE,
mem.current_size(),
)
})
.collect();
@ -539,8 +546,9 @@ impl Instance {
}
pub fn memory_offset_addr(&self, index: usize, offset: usize) -> *const usize {
let mem = &self.memories[index];
unsafe { mem.mmap.as_ptr().offset(offset as isize) as *const usize }
let memories: &[LinearMemory] = &self.memories[..];
let mem = &memories[index];
unsafe { mem[..].as_ptr().add(offset) as *const usize }
}
// Shows the value of a global variable.
@ -568,21 +576,10 @@ extern "C" fn grow_memory(size: u32, memory_index: u32, instance: &mut Instance)
.grow(size)
.unwrap_or(-1);
if old_mem_size != -1 {
// Get new memory bytes
let new_mem_bytes = (old_mem_size as usize + size as usize) * LinearMemory::WASM_PAGE_SIZE;
// Update data_pointer
instance
.data_pointers
.memories
.get_unchecked_mut(memory_index as usize)
.len = new_mem_bytes;
}
old_mem_size
}
extern "C" fn current_memory(memory_index: u32, instance: &mut Instance) -> u32 {
let memory = &instance.memories[memory_index as usize];
memory.current_size() as u32
memory.current_pages() as u32
}

View File

@ -3,27 +3,27 @@
//! webassembly::Instance.
//! A memory created by Rust or in WebAssembly code will be accessible and
//! mutable from both Rust and WebAssembly.
use memmap::MmapMut;
use std::fmt;
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
use nix::libc::{c_void, mprotect, PROT_READ, PROT_WRITE};
use std::slice;
use std::ops::{Deref, DerefMut};
const PAGE_SIZE: u32 = 65536;
const MAX_PAGES: u32 = 65536;
/// A linear memory instance.
///
//
#[derive(Debug)]
pub struct LinearMemory {
pub mmap: MmapMut,
// The initial size of the WebAssembly Memory, in units of
// WebAssembly pages.
pub current: u32,
base: *mut c_void, // The size will always be `LinearMemory::DEFAULT_SIZE`
current: u32, // current number of wasm pages
// The maximum size the WebAssembly Memory is allowed to grow
// to, in units of WebAssembly pages. When present, the maximum
// parameter acts as a hint to the engine to reserve memory up
// front. However, the engine may ignore or clamp this reservation
// request. In general, most WebAssembly modules shouldn't need
// to set a maximum.
pub maximum: Option<u32>,
maximum: Option<u32>,
}
/// It holds the raw bytes of memory accessed by a WebAssembly Instance
@ -45,11 +45,30 @@ impl LinearMemory {
);
// TODO: Investigate if memory is zeroed out
let mmap = MmapMut::map_anon(LinearMemory::DEFAULT_HEAP_SIZE).unwrap();
let base = unsafe {
mmap(
0 as _,
LinearMemory::DEFAULT_SIZE,
ProtFlags::PROT_NONE,
MapFlags::MAP_ANON | MapFlags::MAP_SHARED,
-1,
0,
).unwrap()
};
if initial > 0 {
assert_eq!(unsafe {
mprotect(
base,
(initial * PAGE_SIZE) as _,
PROT_READ | PROT_WRITE,
)
}, 0);
}
debug!("LinearMemory instantiated");
Self {
mmap,
base,
current: initial,
maximum,
}
@ -57,11 +76,15 @@ impl LinearMemory {
/// Returns an base address of this linear memory.
pub fn base_addr(&mut self) -> *mut u8 {
self.mmap.as_mut_ptr()
self.base as _
}
/// Returns a number of allocated wasm pages.
pub fn current_size(&self) -> u32 {
pub fn current_size(&self) -> usize {
(self.current * PAGE_SIZE) as _
}
pub fn current_pages(&self) -> u32 {
self.current
}
@ -76,7 +99,10 @@ impl LinearMemory {
/// of pages.
pub fn grow(&mut self, add_pages: u32) -> Option<i32> {
debug!("grow_memory called!");
debug!("old memory = {:?}", self.mmap);
if add_pages == 0 {
return Some(self.current as _);
}
let prev_pages = self.current;
let new_pages = match self.current.checked_add(add_pages) {
@ -91,18 +117,23 @@ impl LinearMemory {
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
} else if new_pages >= 65536 {
} else if new_pages >= MAX_PAGES {
return None;
}
let prev_bytes = (prev_pages * PAGE_SIZE) as usize;
let new_bytes = (new_pages * PAGE_SIZE) as usize;
// Updating self.current if new_bytes > prev_bytes
if new_bytes > prev_bytes {
self.current = new_pages;
unsafe {
assert_eq!(mprotect(
self.base.add(prev_bytes),
new_bytes - prev_bytes,
PROT_READ | PROT_WRITE,
), 0);
}
self.current = new_pages;
Some(prev_pages as i32)
}
@ -119,16 +150,6 @@ impl LinearMemory {
}
}
impl fmt::Debug for LinearMemory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("LinearMemory")
.field("mmap", &self.mmap)
.field("current", &self.current)
.field("maximum", &self.maximum)
.finish()
}
}
// Not comparing based on memory content. That would be inefficient.
impl PartialEq for LinearMemory {
fn eq(&self, other: &LinearMemory) -> bool {
@ -136,42 +157,20 @@ impl PartialEq for LinearMemory {
}
}
impl AsRef<[u8]> for LinearMemory {
fn as_ref(&self) -> &[u8] {
&self.mmap
}
}
impl AsMut<[u8]> for LinearMemory {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.mmap
}
}
impl Deref for LinearMemory {
type Target = [u8];
fn deref(&self) -> &[u8] {
&*self.mmap
unsafe {
slice::from_raw_parts(self.base as _, (self.current * PAGE_SIZE) as _)
}
}
}
impl DerefMut for LinearMemory {
fn deref_mut(&mut self) -> &mut [u8] {
&mut *self.mmap
unsafe {
slice::from_raw_parts_mut(self.base as _, (self.current * PAGE_SIZE) as _)
}
}
}
// impl Clone for LinearMemory {
// fn clone(&self) -> LinearMemory {
// let mut mmap = MmapMut::map_anon(self.maximum.unwrap_or(self.current) as usize).unwrap();
// let mut base_mmap = &self.mmap;
// let to_init = &mut mmap[0..self.current as usize];
// to_init.copy_from_slice(&self.mmap);
// return LinearMemory {
// mmap: mmap,
// current: self.current,
// maximum: self.maximum,
// };
// }
// }

View File

@ -7,7 +7,7 @@ pub mod module;
pub mod relocation;
pub mod utils;
use cranelift_codegen::{isa, settings};
use cranelift_codegen::{isa, settings::{self, Configurable}};
use std::panic;
use std::str::FromStr;
use target_lexicon;
@ -47,7 +47,16 @@ pub fn instantiate(
buffer_source: Vec<u8>,
import_object: ImportObject<&str, &str>,
) -> Result<ResultObject, ErrorKind> {
let flags = settings::Flags::new(settings::builder());
let flags = {
let mut builder = settings::builder();
builder.set("opt_level", "best")
.unwrap();
let flags = settings::Flags::new(builder);
debug_assert_eq!(flags.opt_level(), settings::OptLevel::Best);
flags
};
let isa = isa::lookup(triple!("x86_64")).unwrap().finish(flags);
let module = compile(buffer_source)?;

View File

@ -401,9 +401,6 @@ impl<'environment> FuncEnvironmentTrait for FuncEnvironment<'environment> {
}
// TODO: offsets should be based on the architecture the wasmer was compiled for.
// e.g., BoundedSlice.len will be 32-bit (4 bytes) when wasmer is compiled for a 32-bit arch,
// however the 32-bit wasmer may be running on 64-bit arch, which means ptr_size here will
// be 8 bytes. That will definitely gove the wrong offset values
fn make_heap(&mut self, func: &mut ir::Function, memory_index: MemoryIndex) -> ir::Heap {
debug_assert_eq!(
memory_index.index(),
@ -423,8 +420,7 @@ impl<'environment> FuncEnvironmentTrait for FuncEnvironment<'environment> {
});
// Based on the index provided, we need to know the offset into memories array
// Each element in the memories array has a size of (ptr_size * 2)
let memory_data_offset = 0; // (memory_index as usize * ptr_size * 2) as i32;
let memory_data_offset = (memory_index.index() * ptr_size as usize) as i32;
// Load value at the (base + memory_data_offset)
// which is the address of data_pointer.memories[index].data
@ -435,21 +431,14 @@ impl<'environment> FuncEnvironmentTrait for FuncEnvironment<'environment> {
readonly: true,
});
// Load value at the (base + memory_data_offset)
// which is the value of data_pointer.memories[index].len
let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
base,
offset: Offset32::new(memory_data_offset + ptr_size as i32),
global_type: I32,
readonly: false,
});
// Create table based on the data above
let heap = func.create_heap(ir::HeapData {
base: heap_base,
min_size: 0.into(),
guard_size: (LinearMemory::DEFAULT_GUARD_SIZE as i64).into(),
style: ir::HeapStyle::Dynamic { bound_gv },
style: ir::HeapStyle::Static {
bound: (LinearMemory::DEFAULT_HEAP_SIZE as i64).into(),
},
index_type: I32,
});

View File

@ -22,13 +22,9 @@ pub fn print_instance_offsets(instance: &Instance) {
let memories_pointer_address_0 = memories_pointer_address_ptr_0 as usize;
let memories_pointer_address_ptr_0_data: *const usize =
unsafe { transmute(&data_ptr.memories.get_unchecked(0).data) };
unsafe { transmute(&data_ptr.memories.get_unchecked(0)) };
let memories_pointer_address_0_data = memories_pointer_address_ptr_0_data as usize;
let memories_pointer_address_ptr_0_len: *const usize =
unsafe { transmute(&data_ptr.memories.get_unchecked(0).len) };
let memories_pointer_address_0_len = memories_pointer_address_ptr_0_len as usize;
let globals_pointer_address_ptr: *const usize = unsafe { transmute(&data_ptr.globals) };
let globals_pointer_address = globals_pointer_address_ptr as usize;
@ -40,7 +36,6 @@ instance.data_pointers.tables \t- {:X} | offset - {:?}
instance.data_pointers.memories\t- {:X} | offset - {:?}
.memories[0] \t\t- {:X} | offset - {:?}
.memories[0].data\t\t- {:X} | offset - {:?}
.memories[0].len({:?})\t- {:X} | offset - {:?}
instance.data_pointers.globals \t- {:X} | offset - {:?}
====== INSTANCE OFFSET TABLE ======
",
@ -54,9 +49,6 @@ instance.data_pointers.globals \t- {:X} | offset - {:?}
0,
memories_pointer_address_0_data,
memories_pointer_address_0_data - memories_pointer_address_0_data,
data_ptr.memories.get_unchecked(0).len,
memories_pointer_address_0_len,
memories_pointer_address_0_len - memories_pointer_address_0_data,
globals_pointer_address,
globals_pointer_address - instance_address,
);