mirror of
https://github.com/fluencelabs/wasmer
synced 2024-12-12 22:05:33 +00:00
Full preemptive snapshot/resume.
This commit is contained in:
parent
7d0b70bddf
commit
967027003d
@ -164,6 +164,7 @@ impl Intrinsics {
|
||||
let memory_base_ty = i8_ty;
|
||||
let memory_bound_ty = void_ty;
|
||||
let internals_ty = i64_ty;
|
||||
let interrupt_signal_mem_ty = i8_ty;
|
||||
let local_function_ty = i8_ptr_ty;
|
||||
|
||||
let anyfunc_ty = context.struct_type(
|
||||
@ -222,6 +223,9 @@ impl Intrinsics {
|
||||
internals_ty
|
||||
.ptr_type(AddressSpace::Generic)
|
||||
.as_basic_type_enum(),
|
||||
interrupt_signal_mem_ty
|
||||
.ptr_type(AddressSpace::Generic)
|
||||
.as_basic_type_enum(),
|
||||
local_function_ty
|
||||
.ptr_type(AddressSpace::Generic)
|
||||
.as_basic_type_enum(),
|
||||
|
@ -17,6 +17,7 @@ impl FunctionMiddleware for CallTrace {
|
||||
Event::Internal(InternalEvent::FunctionBegin(id)) => sink.push(Event::Internal(
|
||||
InternalEvent::Breakpoint(Box::new(move |_| {
|
||||
eprintln!("func ({})", id);
|
||||
Ok(())
|
||||
})),
|
||||
)),
|
||||
_ => {}
|
||||
|
@ -94,11 +94,9 @@ impl FunctionMiddleware for Metering {
|
||||
sink.push(Event::WasmOwned(Operator::If {
|
||||
ty: WpType::EmptyBlockType,
|
||||
}));
|
||||
sink.push(Event::Internal(InternalEvent::Breakpoint(Box::new(
|
||||
move |ctx| unsafe {
|
||||
(ctx.throw)(Box::new(ExecutionLimitExceededError));
|
||||
},
|
||||
))));
|
||||
sink.push(Event::Internal(InternalEvent::Breakpoint(Box::new(|_| {
|
||||
Err(Box::new(ExecutionLimitExceededError))
|
||||
}))));
|
||||
sink.push(Event::WasmOwned(Operator::End));
|
||||
}
|
||||
_ => {}
|
||||
|
@ -16,8 +16,42 @@ leaq run_on_alternative_stack.returning(%rip), %rax
|
||||
movq %rax, -24(%rdi)
|
||||
|
||||
movq %rsi, %rsp
|
||||
|
||||
movq (%rsp), %xmm0
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm1
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm2
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm3
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm4
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm5
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm6
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm7
|
||||
add $8, %rsp
|
||||
|
||||
popq %rbp
|
||||
popq %rax
|
||||
popq %rbx
|
||||
popq %rcx
|
||||
popq %rdx
|
||||
popq %rdi
|
||||
popq %rsi
|
||||
popq %r8
|
||||
popq %r9
|
||||
popq %r10
|
||||
popq %r11
|
||||
popq %r12
|
||||
popq %r13
|
||||
popq %r14
|
||||
|
@ -16,8 +16,42 @@ leaq _run_on_alternative_stack.returning(%rip), %rax
|
||||
movq %rax, -24(%rdi)
|
||||
|
||||
movq %rsi, %rsp
|
||||
|
||||
movq (%rsp), %xmm0
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm1
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm2
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm3
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm4
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm5
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm6
|
||||
add $8, %rsp
|
||||
|
||||
movq (%rsp), %xmm7
|
||||
add $8, %rsp
|
||||
|
||||
popq %rbp
|
||||
popq %rax
|
||||
popq %rbx
|
||||
popq %rcx
|
||||
popq %rdx
|
||||
popq %rdi
|
||||
popq %rsi
|
||||
popq %r8
|
||||
popq %r9
|
||||
popq %r10
|
||||
popq %r11
|
||||
popq %r12
|
||||
popq %r13
|
||||
popq %r14
|
||||
|
@ -2,49 +2,103 @@ mod raw {
|
||||
use std::ffi::c_void;
|
||||
|
||||
extern "C" {
|
||||
pub fn run_on_alternative_stack(
|
||||
stack_end: *mut u64,
|
||||
stack_begin: *mut u64,
|
||||
userdata_arg2: *mut u8,
|
||||
) -> u64;
|
||||
pub fn run_on_alternative_stack(stack_end: *mut u64, stack_begin: *mut u64) -> u64;
|
||||
pub fn setjmp(env: *mut c_void) -> i32;
|
||||
pub fn longjmp(env: *mut c_void, val: i32) -> !;
|
||||
}
|
||||
}
|
||||
|
||||
use crate::state::x64::{read_stack, X64Register, GPR};
|
||||
use crate::suspend;
|
||||
use crate::codegen::{BkptInfo, BkptMap};
|
||||
use crate::state::x64::{build_instance_image, read_stack, X64Register, GPR, XMM};
|
||||
use crate::vm;
|
||||
use libc::siginfo_t;
|
||||
use libc::{mmap, mprotect, siginfo_t, MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
|
||||
use nix::sys::signal::{
|
||||
sigaction, SaFlags, SigAction, SigHandler, SigSet, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGSEGV,
|
||||
SIGTRAP,
|
||||
sigaction, SaFlags, SigAction, SigHandler, SigSet, Signal, SIGBUS, SIGFPE, SIGILL, SIGINT,
|
||||
SIGSEGV, SIGTRAP,
|
||||
};
|
||||
use std::any::Any;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::ffi::c_void;
|
||||
use std::process;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Once;
|
||||
|
||||
pub(crate) unsafe fn run_on_alternative_stack(stack_end: *mut u64, stack_begin: *mut u64) -> u64 {
|
||||
raw::run_on_alternative_stack(stack_end, stack_begin, ::std::ptr::null_mut())
|
||||
raw::run_on_alternative_stack(stack_end, stack_begin)
|
||||
}
|
||||
|
||||
const SETJMP_BUFFER_LEN: usize = 27;
|
||||
type SetJmpBuffer = [i32; SETJMP_BUFFER_LEN];
|
||||
|
||||
thread_local! {
|
||||
static UNWIND: UnsafeCell<Option<(SetJmpBuffer, Option<Box<Any>>)>> = UnsafeCell::new(None);
|
||||
struct UnwindInfo {
|
||||
jmpbuf: SetJmpBuffer, // in
|
||||
breakpoints: Option<BkptMap>,
|
||||
payload: Option<Box<Any>>, // out
|
||||
}
|
||||
|
||||
pub unsafe fn catch_unsafe_unwind<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<Any>> {
|
||||
thread_local! {
|
||||
static UNWIND: UnsafeCell<Option<UnwindInfo>> = UnsafeCell::new(None);
|
||||
}
|
||||
|
||||
struct InterruptSignalMem(*mut u8);
|
||||
unsafe impl Send for InterruptSignalMem {}
|
||||
unsafe impl Sync for InterruptSignalMem {}
|
||||
|
||||
const INTERRUPT_SIGNAL_MEM_SIZE: usize = 4096;
|
||||
|
||||
lazy_static! {
|
||||
static ref INTERRUPT_SIGNAL_MEM: InterruptSignalMem = {
|
||||
let ptr = unsafe {
|
||||
mmap(
|
||||
::std::ptr::null_mut(),
|
||||
INTERRUPT_SIGNAL_MEM_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
};
|
||||
if ptr as isize == -1 {
|
||||
panic!("cannot allocate code memory");
|
||||
}
|
||||
InterruptSignalMem(ptr as _)
|
||||
};
|
||||
}
|
||||
static INTERRUPT_SIGNAL_DELIVERED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub unsafe fn get_wasm_interrupt_signal_mem() -> *mut u8 {
|
||||
INTERRUPT_SIGNAL_MEM.0
|
||||
}
|
||||
|
||||
pub unsafe fn set_wasm_interrupt() {
|
||||
let mem: *mut u8 = INTERRUPT_SIGNAL_MEM.0;
|
||||
if mprotect(mem as _, INTERRUPT_SIGNAL_MEM_SIZE, PROT_NONE) < 0 {
|
||||
panic!("cannot set PROT_NONE on signal mem");
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn clear_wasm_interrupt() {
|
||||
let mem: *mut u8 = INTERRUPT_SIGNAL_MEM.0;
|
||||
if mprotect(mem as _, INTERRUPT_SIGNAL_MEM_SIZE, PROT_READ | PROT_WRITE) < 0 {
|
||||
panic!("cannot set PROT_READ | PROT_WRITE on signal mem");
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn catch_unsafe_unwind<R, F: FnOnce() -> R>(
|
||||
f: F,
|
||||
breakpoints: Option<BkptMap>,
|
||||
) -> Result<R, Box<Any>> {
|
||||
let unwind = UNWIND.with(|x| x.get());
|
||||
let old = (*unwind).take();
|
||||
*unwind = Some(([0; SETJMP_BUFFER_LEN], None));
|
||||
*unwind = Some(UnwindInfo {
|
||||
jmpbuf: [0; SETJMP_BUFFER_LEN],
|
||||
breakpoints: breakpoints,
|
||||
payload: None,
|
||||
});
|
||||
|
||||
if raw::setjmp(&mut (*unwind).as_mut().unwrap().0 as *mut SetJmpBuffer as *mut _) != 0 {
|
||||
if raw::setjmp(&mut (*unwind).as_mut().unwrap().jmpbuf as *mut SetJmpBuffer as *mut _) != 0 {
|
||||
// error
|
||||
let ret = (*unwind).as_mut().unwrap().1.take().unwrap();
|
||||
let ret = (*unwind).as_mut().unwrap().payload.take().unwrap();
|
||||
*unwind = old;
|
||||
Err(ret)
|
||||
} else {
|
||||
@ -60,8 +114,16 @@ pub unsafe fn begin_unsafe_unwind(e: Box<Any>) -> ! {
|
||||
let inner = (*unwind)
|
||||
.as_mut()
|
||||
.expect("not within a catch_unsafe_unwind scope");
|
||||
inner.1 = Some(e);
|
||||
raw::longjmp(&mut inner.0 as *mut SetJmpBuffer as *mut _, 0xffff);
|
||||
inner.payload = Some(e);
|
||||
raw::longjmp(&mut inner.jmpbuf as *mut SetJmpBuffer as *mut _, 0xffff);
|
||||
}
|
||||
|
||||
unsafe fn with_breakpoint_map<R, F: FnOnce(Option<&BkptMap>) -> R>(f: F) -> R {
|
||||
let unwind = UNWIND.with(|x| x.get());
|
||||
let inner = (*unwind)
|
||||
.as_mut()
|
||||
.expect("not within a catch_unsafe_unwind scope");
|
||||
f(inner.breakpoints.as_ref())
|
||||
}
|
||||
|
||||
pub fn allocate_and_run<R, F: FnOnce() -> R>(size: usize, f: F) -> R {
|
||||
@ -70,7 +132,7 @@ pub fn allocate_and_run<R, F: FnOnce() -> R>(size: usize, f: F) -> R {
|
||||
ret: Option<R>,
|
||||
}
|
||||
|
||||
extern "C" fn invoke<F: FnOnce() -> R, R>(_: u64, _: u64, ctx: &mut Context<F, R>) {
|
||||
extern "C" fn invoke<F: FnOnce() -> R, R>(ctx: &mut Context<F, R>) {
|
||||
let f = ctx.f.take().unwrap();
|
||||
ctx.ret = Some(f());
|
||||
}
|
||||
@ -89,29 +151,65 @@ pub fn allocate_and_run<R, F: FnOnce() -> R>(size: usize, f: F) -> R {
|
||||
stack[end_offset - 4] = invoke::<F, R> as usize as u64;
|
||||
|
||||
// NOTE: Keep this consistent with `image-loading-*.s`.
|
||||
let stack_begin = stack.as_mut_ptr().offset((end_offset - 4 - 6) as isize);
|
||||
stack[end_offset - 4 - 10] = &mut ctx as *mut Context<F, R> as usize as u64; // rdi
|
||||
const NUM_SAVED_REGISTERS: usize = 23;
|
||||
let stack_begin = stack
|
||||
.as_mut_ptr()
|
||||
.offset((end_offset - 4 - NUM_SAVED_REGISTERS) as isize);
|
||||
let stack_end = stack.as_mut_ptr().offset(end_offset as isize);
|
||||
|
||||
raw::run_on_alternative_stack(
|
||||
stack_end,
|
||||
stack_begin,
|
||||
&mut ctx as *mut Context<F, R> as *mut u8,
|
||||
);
|
||||
raw::run_on_alternative_stack(stack_end, stack_begin);
|
||||
ctx.ret.take().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" fn signal_trap_handler(
|
||||
_signum: ::nix::libc::c_int,
|
||||
signum: ::nix::libc::c_int,
|
||||
siginfo: *mut siginfo_t,
|
||||
ucontext: *mut c_void,
|
||||
) {
|
||||
unsafe {
|
||||
let fault = get_fault_info(siginfo as _, ucontext);
|
||||
|
||||
allocate_and_run(65536, || {
|
||||
let mut unwind_result: Box<dyn Any> = Box::new(());
|
||||
|
||||
let should_unwind = allocate_and_run(1048576, || {
|
||||
let mut is_suspend_signal = false;
|
||||
|
||||
match Signal::from_c_int(signum) {
|
||||
Ok(SIGTRAP) => {
|
||||
// breakpoint
|
||||
let out: Option<Result<(), Box<dyn Any>>> = with_breakpoint_map(|bkpt_map| {
|
||||
bkpt_map.and_then(|x| x.get(&(fault.ip as usize))).map(|x| {
|
||||
x(BkptInfo {
|
||||
fault: Some(&fault),
|
||||
})
|
||||
})
|
||||
});
|
||||
match out {
|
||||
Some(Ok(())) => {
|
||||
return false;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
unwind_result = e;
|
||||
return true;
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
Ok(SIGSEGV) | Ok(SIGBUS) => {
|
||||
println!("SIGSEGV/SIGBUS on addr {:?}", fault.faulting_addr);
|
||||
if fault.faulting_addr as usize == get_wasm_interrupt_signal_mem() as usize {
|
||||
is_suspend_signal = true;
|
||||
clear_wasm_interrupt();
|
||||
INTERRUPT_SIGNAL_DELIVERED.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// TODO: make this safer
|
||||
let ctx = &*(fault.known_registers[X64Register::GPR(GPR::R15).to_index().0].unwrap()
|
||||
let ctx = &mut *(fault.known_registers[X64Register::GPR(GPR::R15).to_index().0].unwrap()
|
||||
as *mut vm::Ctx);
|
||||
let rsp = fault.known_registers[X64Register::GPR(GPR::RSP).to_index().0].unwrap();
|
||||
|
||||
@ -120,7 +218,7 @@ extern "C" fn signal_trap_handler(
|
||||
.get_module_state_map()
|
||||
.unwrap();
|
||||
let code_base = (*ctx.module).runnable_module.get_code().unwrap().as_ptr() as usize;
|
||||
let image = read_stack(
|
||||
let es_image = read_stack(
|
||||
&msm,
|
||||
code_base,
|
||||
rsp as usize as *const u64,
|
||||
@ -128,17 +226,26 @@ extern "C" fn signal_trap_handler(
|
||||
Some(fault.ip as usize as u64),
|
||||
);
|
||||
|
||||
use colored::*;
|
||||
eprintln!(
|
||||
"\n{}",
|
||||
"Wasmer encountered an error while running your WebAssembly program."
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
image.print_backtrace_if_needed();
|
||||
if is_suspend_signal {
|
||||
let image = build_instance_image(ctx, es_image);
|
||||
unwind_result = Box::new(image);
|
||||
} else {
|
||||
use colored::*;
|
||||
eprintln!(
|
||||
"\n{}",
|
||||
"Wasmer encountered an error while running your WebAssembly program."
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
es_image.print_backtrace_if_needed();
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
|
||||
begin_unsafe_unwind(Box::new(()));
|
||||
if should_unwind {
|
||||
begin_unsafe_unwind(unwind_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,14 +254,13 @@ extern "C" fn sigint_handler(
|
||||
_siginfo: *mut siginfo_t,
|
||||
_ucontext: *mut c_void,
|
||||
) {
|
||||
if suspend::get_interrupted() {
|
||||
eprintln!(
|
||||
"Got another SIGINT before interrupt is handled by WebAssembly program, aborting"
|
||||
);
|
||||
if INTERRUPT_SIGNAL_DELIVERED.swap(true, Ordering::SeqCst) {
|
||||
eprintln!("Got another SIGINT before trap is triggered on WebAssembly side, aborting");
|
||||
process::abort();
|
||||
}
|
||||
suspend::set_interrupted(true);
|
||||
eprintln!("Notified WebAssembly program to exit");
|
||||
unsafe {
|
||||
set_wasm_interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ensure_sighandler() {
|
||||
@ -285,12 +391,17 @@ pub unsafe fn get_fault_info(siginfo: *const c_void, ucontext: *const c_void) ->
|
||||
fs: u64,
|
||||
gs: u64,
|
||||
}
|
||||
#[repr(C)]
|
||||
struct fpstate {
|
||||
_unused: [u8; 168],
|
||||
xmm: [[u64; 2]; 8],
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
#[repr(C)]
|
||||
struct mcontext_t {
|
||||
es: exception_state,
|
||||
ss: regs,
|
||||
// ...
|
||||
fs: fpstate,
|
||||
}
|
||||
|
||||
let siginfo = siginfo as *const siginfo_t;
|
||||
@ -298,6 +409,7 @@ pub unsafe fn get_fault_info(siginfo: *const c_void, ucontext: *const c_void) ->
|
||||
|
||||
let ucontext = ucontext as *const ucontext_t;
|
||||
let ss = &(*(*ucontext).uc_mcontext).ss;
|
||||
let fs = &(*(*ucontext).uc_mcontext).fs;
|
||||
|
||||
let mut known_registers: [Option<u64>; 24] = [None; 24];
|
||||
|
||||
@ -319,7 +431,14 @@ pub unsafe fn get_fault_info(siginfo: *const c_void, ucontext: *const c_void) ->
|
||||
known_registers[X64Register::GPR(GPR::RBP).to_index().0] = Some(ss.rbp);
|
||||
known_registers[X64Register::GPR(GPR::RSP).to_index().0] = Some(ss.rsp);
|
||||
|
||||
// TODO: XMM registers
|
||||
known_registers[X64Register::XMM(XMM::XMM0).to_index().0] = Some(fs.xmm[0][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM1).to_index().0] = Some(fs.xmm[1][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM2).to_index().0] = Some(fs.xmm[2][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM3).to_index().0] = Some(fs.xmm[3][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM4).to_index().0] = Some(fs.xmm[4][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM5).to_index().0] = Some(fs.xmm[5][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM6).to_index().0] = Some(fs.xmm[6][0]);
|
||||
known_registers[X64Register::XMM(XMM::XMM7).to_index().0] = Some(fs.xmm[7][0]);
|
||||
|
||||
FaultInfo {
|
||||
faulting_addr: si_addr,
|
||||
|
@ -9,6 +9,7 @@ use crate::{
|
||||
|
||||
use crate::{
|
||||
cache::{Artifact, Error as CacheError},
|
||||
codegen::BkptMap,
|
||||
module::ModuleInfo,
|
||||
sys::Memory,
|
||||
};
|
||||
@ -89,6 +90,10 @@ pub trait RunnableModule: Send + Sync {
|
||||
None
|
||||
}
|
||||
|
||||
fn get_breakpoints(&self) -> Option<BkptMap> {
|
||||
None
|
||||
}
|
||||
|
||||
/// A wasm trampoline contains the necessary data to dynamically call an exported wasm function.
|
||||
/// Given a particular signature index, we are returned a trampoline that is matched with that
|
||||
/// signature and an invoke function that can call the trampoline.
|
||||
|
@ -9,6 +9,7 @@ use crate::{
|
||||
};
|
||||
use smallvec::SmallVec;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
@ -16,6 +17,9 @@ use std::sync::{Arc, RwLock};
|
||||
use wasmparser::{self, WasmDecoder};
|
||||
use wasmparser::{Operator, Type as WpType};
|
||||
|
||||
pub type BkptHandler = Box<Fn(BkptInfo) -> Result<(), Box<dyn Any>> + Send + Sync + 'static>;
|
||||
pub type BkptMap = Arc<HashMap<usize, BkptHandler>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Event<'a, 'b> {
|
||||
Internal(InternalEvent),
|
||||
@ -26,7 +30,7 @@ pub enum Event<'a, 'b> {
|
||||
pub enum InternalEvent {
|
||||
FunctionBegin(u32),
|
||||
FunctionEnd,
|
||||
Breakpoint(Box<Fn(BkptInfo) + Send + Sync + 'static>),
|
||||
Breakpoint(BkptHandler),
|
||||
SetInternal(u32),
|
||||
GetInternal(u32),
|
||||
}
|
||||
@ -43,8 +47,8 @@ impl fmt::Debug for InternalEvent {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BkptInfo {
|
||||
pub throw: unsafe fn(Box<dyn Any>) -> !,
|
||||
pub struct BkptInfo<'a> {
|
||||
pub fault: Option<&'a dyn Any>,
|
||||
}
|
||||
|
||||
pub trait ModuleCodeGenerator<FCG: FunctionCodeGenerator<E>, RM: RunnableModule, E: Debug> {
|
||||
|
@ -46,8 +46,6 @@ pub use trampoline_x64 as trampoline;
|
||||
#[cfg(all(unix, target_arch = "x86_64"))]
|
||||
pub mod alternative_stack;
|
||||
pub mod state;
|
||||
#[cfg(all(unix, target_arch = "x86_64"))]
|
||||
pub mod suspend;
|
||||
|
||||
use self::error::CompileResult;
|
||||
#[doc(inline)]
|
||||
|
@ -53,10 +53,17 @@ pub struct FunctionStateMap {
|
||||
pub locals: Vec<WasmAbstractValue>,
|
||||
pub shadow_size: usize, // for single-pass backend, 32 bytes on x86-64
|
||||
pub diffs: Vec<MachineStateDiff>,
|
||||
pub wasm_function_header_target_offset: Option<usize>,
|
||||
pub wasm_offset_to_target_offset: Vec<usize>,
|
||||
pub loop_offsets: BTreeMap<usize, usize>, /* offset -> diff_id */
|
||||
pub call_offsets: BTreeMap<usize, usize>, /* offset -> diff_id */
|
||||
pub trappable_offsets: BTreeMap<usize, usize>, /* offset -> diff_id */
|
||||
pub loop_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
|
||||
pub call_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
|
||||
pub trappable_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OffsetInfo {
|
||||
pub diff_id: usize,
|
||||
pub activate_offset: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -98,7 +105,7 @@ impl ModuleStateMap {
|
||||
.unwrap();
|
||||
|
||||
match fsm.call_offsets.get(&(ip - base)) {
|
||||
Some(x) => Some((fsm, fsm.diffs[*x].build_state(fsm))),
|
||||
Some(x) => Some((fsm, fsm.diffs[x.diff_id].build_state(fsm))),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
@ -120,7 +127,25 @@ impl ModuleStateMap {
|
||||
.unwrap();
|
||||
|
||||
match fsm.trappable_offsets.get(&(ip - base)) {
|
||||
Some(x) => Some((fsm, fsm.diffs[*x].build_state(fsm))),
|
||||
Some(x) => Some((fsm, fsm.diffs[x.diff_id].build_state(fsm))),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn lookup_loop_ip(&self, ip: usize, base: usize) -> Option<(&FunctionStateMap, MachineState)> {
|
||||
if ip < base || ip - base >= self.total_size {
|
||||
None
|
||||
} else {
|
||||
//println!("lookup ip: {} in {:?}", ip - base, self.local_functions);
|
||||
let (_, fsm) = self
|
||||
.local_functions
|
||||
.range((Unbounded, Included(&(ip - base))))
|
||||
.last()
|
||||
.unwrap();
|
||||
|
||||
match fsm.loop_offsets.get(&(ip - base)) {
|
||||
Some(x) => Some((fsm, fsm.diffs[x.diff_id].build_state(fsm))),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
@ -140,6 +165,7 @@ impl FunctionStateMap {
|
||||
shadow_size,
|
||||
locals,
|
||||
diffs: vec![],
|
||||
wasm_function_header_target_offset: None,
|
||||
wasm_offset_to_target_offset: Vec::new(),
|
||||
loop_offsets: BTreeMap::new(),
|
||||
call_offsets: BTreeMap::new(),
|
||||
@ -330,6 +356,7 @@ impl InstanceImage {
|
||||
pub mod x64 {
|
||||
use super::*;
|
||||
use crate::alternative_stack::{catch_unsafe_unwind, run_on_alternative_stack};
|
||||
use crate::codegen::BkptMap;
|
||||
use crate::structures::TypedIndex;
|
||||
use crate::types::LocalGlobalIndex;
|
||||
use crate::vm::Ctx;
|
||||
@ -352,6 +379,7 @@ pub mod x64 {
|
||||
code_base: usize,
|
||||
image: InstanceImage,
|
||||
vmctx: &mut Ctx,
|
||||
breakpoints: Option<BkptMap>,
|
||||
) -> Result<u64, Box<dyn Any>> {
|
||||
let mut stack: Vec<u64> = vec![0; 1048576 * 8 / 8]; // 8MB stack
|
||||
let mut stack_offset: usize = stack.len();
|
||||
@ -368,15 +396,31 @@ pub mod x64 {
|
||||
// Bottom to top
|
||||
for f in image.execution_state.frames.iter().rev() {
|
||||
let fsm = local_functions_vec[f.local_function_id];
|
||||
let call_begin_offset = fsm.wasm_offset_to_target_offset[f.wasm_inst_offset];
|
||||
let begin_offset = if f.wasm_inst_offset == ::std::usize::MAX {
|
||||
fsm.wasm_function_header_target_offset.unwrap()
|
||||
} else {
|
||||
fsm.wasm_offset_to_target_offset[f.wasm_inst_offset]
|
||||
};
|
||||
|
||||
// Left bound must be Excluded because it's possible that the previous instruction's (after-)call offset == call_begin_offset.
|
||||
let (after_call_inst, diff_id) = fsm
|
||||
.call_offsets
|
||||
.range((Excluded(&call_begin_offset), Unbounded))
|
||||
.next()
|
||||
.map(|(k, v)| (*k, *v))
|
||||
.expect("instruction offset not found in call offsets");
|
||||
let (target_inst_offset, diff_id) = fsm
|
||||
.loop_offsets
|
||||
.get(&begin_offset)
|
||||
.map(|v| (v.activate_offset, v.diff_id))
|
||||
.or_else(|| {
|
||||
fsm.trappable_offsets
|
||||
.get(&begin_offset)
|
||||
.map(|v| (v.activate_offset, v.diff_id))
|
||||
})
|
||||
.or_else(|| {
|
||||
// Left bound must be Excluded because it's possible that the previous instruction's (after-)call offset == call_begin_offset.
|
||||
// This might not be the correct offset if begin_offset itself does not correspond to a call(_indirect) instruction,
|
||||
// but anyway safety isn't broken because diff_id always corresponds to target_inst_offset.
|
||||
fsm.call_offsets
|
||||
.range((Excluded(&begin_offset), Unbounded))
|
||||
.next()
|
||||
.map(|(_, v)| (v.activate_offset, v.diff_id))
|
||||
})
|
||||
.expect("instruction offset not found in any offset type");
|
||||
|
||||
let diff = &fsm.diffs[diff_id];
|
||||
let state = diff.build_state(fsm);
|
||||
@ -434,7 +478,10 @@ pub mod x64 {
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(got_explicit_shadow);
|
||||
if !got_explicit_shadow {
|
||||
assert!(fsm.shadow_size % 8 == 0);
|
||||
stack_offset -= fsm.shadow_size / 8;
|
||||
}
|
||||
for (i, v) in state.register_values.iter().enumerate() {
|
||||
match *v {
|
||||
MachineValue::Undefined => {}
|
||||
@ -460,9 +507,11 @@ pub mod x64 {
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
assert!((stack.len() - stack_offset) % 2 == 0); // 16-byte alignment
|
||||
|
||||
// no need to check 16-byte alignment here because it's possible that we're not at a call entry.
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = (code_base + after_call_inst) as u64; // return address
|
||||
stack[stack_offset] = (code_base + target_inst_offset) as u64; // return address
|
||||
}
|
||||
|
||||
stack_offset -= 1;
|
||||
@ -477,12 +526,71 @@ pub mod x64 {
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R12).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R11).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R10).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R9).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R8).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RSI).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RDI).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RDX).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RCX).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RBX).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RAX).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] = stack.as_ptr().offset(last_stack_offset as isize) as usize as u64; // rbp
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM7).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM6).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM5).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM4).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM3).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM2).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM1).to_index().0].unwrap_or(0);
|
||||
|
||||
stack_offset -= 1;
|
||||
stack[stack_offset] =
|
||||
known_registers[X64Register::XMM(XMM::XMM0).to_index().0].unwrap_or(0);
|
||||
|
||||
if let Some(ref memory) = image.memory {
|
||||
assert!(vmctx.internal.memory_bound <= memory.len());
|
||||
|
||||
@ -512,12 +620,15 @@ pub mod x64 {
|
||||
|
||||
drop(image); // free up host memory
|
||||
|
||||
catch_unsafe_unwind(|| {
|
||||
run_on_alternative_stack(
|
||||
stack.as_mut_ptr().offset(stack.len() as isize),
|
||||
stack.as_mut_ptr().offset(stack_offset as isize),
|
||||
)
|
||||
})
|
||||
catch_unsafe_unwind(
|
||||
|| {
|
||||
run_on_alternative_stack(
|
||||
stack.as_mut_ptr().offset(stack.len() as isize),
|
||||
stack.as_mut_ptr().offset(stack_offset as isize),
|
||||
)
|
||||
},
|
||||
breakpoints,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build_instance_image(
|
||||
@ -575,6 +686,7 @@ pub mod x64 {
|
||||
let (fsm, state) = match msm
|
||||
.lookup_call_ip(ret_addr as usize, code_base)
|
||||
.or_else(|| msm.lookup_trappable_ip(ret_addr as usize, code_base))
|
||||
.or_else(|| msm.lookup_loop_ip(ret_addr as usize, code_base))
|
||||
{
|
||||
Some(x) => x,
|
||||
_ => return ExecutionStateImage { frames: results },
|
||||
|
@ -1,104 +0,0 @@
|
||||
use crate::alternative_stack::begin_unsafe_unwind;
|
||||
use crate::import::{ImportObject, Namespace};
|
||||
use crate::trampoline::{CallContext, TrampolineBufferBuilder};
|
||||
use crate::vm::Ctx;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
static INTERRUPTED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub fn set_interrupted(x: bool) {
|
||||
INTERRUPTED.store(x, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
pub fn get_interrupted() -> bool {
|
||||
INTERRUPTED.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn get_and_reset_interrupted() -> bool {
|
||||
INTERRUPTED.swap(false, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn patch_import_object(x: &mut ImportObject) {
|
||||
struct Intrinsics {
|
||||
suspend: fn(&mut Ctx),
|
||||
check_interrupt: fn(&mut Ctx),
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref INTRINSICS: Intrinsics = {
|
||||
let mut builder = TrampolineBufferBuilder::new();
|
||||
let idx_suspend =
|
||||
builder.add_context_rsp_state_preserving_trampoline(suspend, ::std::ptr::null());
|
||||
let idx_check_interrupt = builder
|
||||
.add_context_rsp_state_preserving_trampoline(check_interrupt, ::std::ptr::null());
|
||||
let trampolines = builder.build();
|
||||
|
||||
let ret = Intrinsics {
|
||||
suspend: unsafe { ::std::mem::transmute(trampolines.get_trampoline(idx_suspend)) },
|
||||
check_interrupt: unsafe {
|
||||
::std::mem::transmute(trampolines.get_trampoline(idx_check_interrupt))
|
||||
},
|
||||
};
|
||||
::std::mem::forget(trampolines);
|
||||
ret
|
||||
};
|
||||
}
|
||||
|
||||
let mut ns = Namespace::new();
|
||||
|
||||
let suspend_fn = INTRINSICS.suspend;
|
||||
let check_interrupt_fn = INTRINSICS.check_interrupt;
|
||||
|
||||
ns.insert("suspend", func!(suspend_fn));
|
||||
ns.insert("check_interrupt", func!(check_interrupt_fn));
|
||||
x.register("wasmer_suspend", ns);
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
unsafe extern "C" fn check_interrupt(ctx: &mut Ctx, _: *const CallContext, stack: *const u64) {
|
||||
if get_and_reset_interrupted() {
|
||||
do_suspend(ctx, stack);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
unsafe extern "C" fn suspend(ctx: &mut Ctx, _: *const CallContext, stack: *const u64) {
|
||||
do_suspend(ctx, stack);
|
||||
}
|
||||
|
||||
unsafe fn do_suspend(ctx: &mut Ctx, mut stack: *const u64) -> ! {
|
||||
use crate::state::x64::{build_instance_image, read_stack, X64Register, GPR};
|
||||
|
||||
let image = {
|
||||
let msm = (*ctx.module)
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap();
|
||||
let code_base = (*ctx.module).runnable_module.get_code().unwrap().as_ptr() as usize;
|
||||
|
||||
let mut known_registers: [Option<u64>; 24] = [None; 24];
|
||||
|
||||
let r15 = *stack;
|
||||
let r14 = *stack.offset(1);
|
||||
let r13 = *stack.offset(2);
|
||||
let r12 = *stack.offset(3);
|
||||
let rbx = *stack.offset(4);
|
||||
stack = stack.offset(5);
|
||||
|
||||
known_registers[X64Register::GPR(GPR::R15).to_index().0] = Some(r15);
|
||||
known_registers[X64Register::GPR(GPR::R14).to_index().0] = Some(r14);
|
||||
known_registers[X64Register::GPR(GPR::R13).to_index().0] = Some(r13);
|
||||
known_registers[X64Register::GPR(GPR::R12).to_index().0] = Some(r12);
|
||||
known_registers[X64Register::GPR(GPR::RBX).to_index().0] = Some(rbx);
|
||||
|
||||
let es_image = read_stack(&msm, code_base, stack, known_registers, None);
|
||||
|
||||
{
|
||||
use colored::*;
|
||||
eprintln!("{}", "Suspending instance.".green().bold());
|
||||
}
|
||||
build_instance_image(ctx, es_image)
|
||||
};
|
||||
|
||||
begin_unsafe_unwind(Box::new(image));
|
||||
}
|
@ -100,6 +100,8 @@ pub struct InternalCtx {
|
||||
pub memory_bound: usize,
|
||||
|
||||
pub internals: *mut [u64; INTERNALS_SIZE], // TODO: Make this dynamic?
|
||||
|
||||
pub interrupt_signal_mem: *mut u8,
|
||||
}
|
||||
|
||||
static INTERNAL_FIELDS: AtomicUsize = AtomicUsize::new(0);
|
||||
@ -207,6 +209,17 @@ fn get_intrinsics_for_module(m: &ModuleInfo) -> *const Intrinsics {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(unix, target_arch = "x86_64"))]
|
||||
fn get_interrupt_signal_mem() -> *mut u8 {
|
||||
unsafe { crate::alternative_stack::get_wasm_interrupt_signal_mem() }
|
||||
}
|
||||
|
||||
#[cfg(not(all(unix, target_arch = "x86_64")))]
|
||||
fn get_interrupt_signal_mem() -> *mut u8 {
|
||||
static mut REGION: u64 = 0;
|
||||
unsafe { &mut REGION as *mut u64 as *mut u8 }
|
||||
}
|
||||
|
||||
impl Ctx {
|
||||
#[doc(hidden)]
|
||||
pub unsafe fn new(
|
||||
@ -245,6 +258,8 @@ impl Ctx {
|
||||
memory_bound: mem_bound,
|
||||
|
||||
internals: &mut local_backing.internals.0,
|
||||
|
||||
interrupt_signal_mem: get_interrupt_signal_mem(),
|
||||
},
|
||||
local_functions: local_backing.local_functions.as_ptr(),
|
||||
|
||||
@ -296,6 +311,8 @@ impl Ctx {
|
||||
memory_bound: mem_bound,
|
||||
|
||||
internals: &mut local_backing.internals.0,
|
||||
|
||||
interrupt_signal_mem: get_interrupt_signal_mem(),
|
||||
},
|
||||
local_functions: local_backing.local_functions.as_ptr(),
|
||||
|
||||
@ -419,9 +436,13 @@ impl Ctx {
|
||||
12 * (mem::size_of::<usize>() as u8)
|
||||
}
|
||||
|
||||
pub fn offset_local_functions() -> u8 {
|
||||
pub fn offset_interrupt_signal_mem() -> u8 {
|
||||
13 * (mem::size_of::<usize>() as u8)
|
||||
}
|
||||
|
||||
pub fn offset_local_functions() -> u8 {
|
||||
14 * (mem::size_of::<usize>() as u8)
|
||||
}
|
||||
}
|
||||
|
||||
enum InnerFunc {}
|
||||
@ -640,6 +661,11 @@ mod vm_offset_tests {
|
||||
offset_of!(InternalCtx => internals).get_byte_offset(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Ctx::offset_interrupt_signal_mem() as usize,
|
||||
offset_of!(InternalCtx => interrupt_signal_mem).get_byte_offset(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Ctx::offset_local_functions() as usize,
|
||||
offset_of!(Ctx => local_functions).get_byte_offset(),
|
||||
|
@ -24,7 +24,7 @@ use wasmer_runtime_core::{
|
||||
module::{ModuleInfo, ModuleInner},
|
||||
state::{
|
||||
x64::new_machine_state, x64::X64Register, FunctionStateMap, MachineState, MachineValue,
|
||||
ModuleStateMap, WasmAbstractValue,
|
||||
ModuleStateMap, OffsetInfo, WasmAbstractValue,
|
||||
},
|
||||
structures::{Map, TypedIndex},
|
||||
typed_func::Wasm,
|
||||
@ -151,7 +151,12 @@ pub struct X64FunctionCode {
|
||||
|
||||
assembler: Option<Assembler>,
|
||||
function_labels: Option<HashMap<usize, (DynamicLabel, Option<AssemblyOffset>)>>,
|
||||
breakpoints: Option<HashMap<AssemblyOffset, Box<Fn(BkptInfo) + Send + Sync + 'static>>>,
|
||||
breakpoints: Option<
|
||||
HashMap<
|
||||
AssemblyOffset,
|
||||
Box<Fn(BkptInfo) -> Result<(), Box<dyn Any>> + Send + Sync + 'static>,
|
||||
>,
|
||||
>,
|
||||
returns: SmallVec<[WpType; 1]>,
|
||||
locals: Vec<Location>,
|
||||
num_params: usize,
|
||||
@ -179,7 +184,7 @@ pub struct X64ExecutionContext {
|
||||
function_pointers: Vec<FuncPtr>,
|
||||
function_offsets: Vec<AssemblyOffset>,
|
||||
signatures: Arc<Map<SigIndex, FuncSig>>,
|
||||
breakpoints: Arc<HashMap<usize, Box<Fn(BkptInfo) + Send + Sync + 'static>>>,
|
||||
breakpoints: BkptMap,
|
||||
func_import_count: usize,
|
||||
msm: ModuleStateMap,
|
||||
}
|
||||
@ -217,6 +222,10 @@ impl RunnableModule for X64ExecutionContext {
|
||||
Some(self.msm.clone())
|
||||
}
|
||||
|
||||
fn get_breakpoints(&self) -> Option<BkptMap> {
|
||||
Some(self.breakpoints.clone())
|
||||
}
|
||||
|
||||
fn get_trampoline(&self, _: &ModuleInfo, sig_index: SigIndex) -> Option<Wasm> {
|
||||
use std::ffi::c_void;
|
||||
use wasmer_runtime_core::typed_func::WasmTrapInfo;
|
||||
@ -245,16 +254,17 @@ impl RunnableModule for X64ExecutionContext {
|
||||
num_params_plus_one.unwrap().as_ptr() as usize - 1,
|
||||
);
|
||||
let args_reverse: SmallVec<[u64; 8]> = args.iter().cloned().rev().collect();
|
||||
protect_unix::BKPT_MAP
|
||||
.with(|x| x.borrow_mut().push(execution_context.breakpoints.clone()));
|
||||
let ret = match protect_unix::call_protected(|| {
|
||||
CONSTRUCT_STACK_AND_CALL_WASM(
|
||||
args_reverse.as_ptr(),
|
||||
args_reverse.as_ptr().offset(args_reverse.len() as isize),
|
||||
ctx,
|
||||
func.as_ptr(),
|
||||
)
|
||||
}) {
|
||||
let ret = match protect_unix::call_protected(
|
||||
|| {
|
||||
CONSTRUCT_STACK_AND_CALL_WASM(
|
||||
args_reverse.as_ptr(),
|
||||
args_reverse.as_ptr().offset(args_reverse.len() as isize),
|
||||
ctx,
|
||||
func.as_ptr(),
|
||||
)
|
||||
},
|
||||
Some(execution_context.breakpoints.clone()),
|
||||
) {
|
||||
Ok(x) => {
|
||||
if !rets.is_null() {
|
||||
*rets = x;
|
||||
@ -269,7 +279,6 @@ impl RunnableModule for X64ExecutionContext {
|
||||
false
|
||||
}
|
||||
};
|
||||
protect_unix::BKPT_MAP.with(|x| x.borrow_mut().pop().unwrap());
|
||||
ret
|
||||
}
|
||||
|
||||
@ -548,7 +557,13 @@ impl X64FunctionCode {
|
||||
) {
|
||||
let state_diff_id = Self::get_state_diff(m, fsm, control_stack);
|
||||
let offset = a.get_offset().0;
|
||||
fsm.trappable_offsets.insert(offset, state_diff_id);
|
||||
fsm.trappable_offsets.insert(
|
||||
offset,
|
||||
OffsetInfo {
|
||||
activate_offset: offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/// Moves `loc` to a valid location for `div`/`idiv`.
|
||||
@ -1204,7 +1219,9 @@ impl X64FunctionCode {
|
||||
}
|
||||
|
||||
// Align stack to 16 bytes.
|
||||
if (m.get_stack_offset() + used_gprs.len() * 8 + stack_offset) % 16 != 0 {
|
||||
if (m.get_stack_offset() + used_gprs.len() * 8 + used_xmms.len() * 8 + stack_offset) % 16
|
||||
!= 0
|
||||
{
|
||||
a.emit_sub(Size::S64, Location::Imm32(8), Location::GPR(GPR::RSP));
|
||||
stack_offset += 8;
|
||||
m.state.stack_values.push(MachineValue::Undefined);
|
||||
@ -1299,13 +1316,21 @@ impl X64FunctionCode {
|
||||
Machine::get_param_location(0),
|
||||
); // vmctx
|
||||
|
||||
assert!(m.state.stack_values.len() % 2 == 1); // explicit shadow takes one slot
|
||||
|
||||
cb(a);
|
||||
|
||||
// Offset needs to be after the 'call' instruction.
|
||||
if let Some((fsm, control_stack)) = state_context {
|
||||
let state_diff_id = Self::get_state_diff(m, fsm, control_stack);
|
||||
let offset = a.get_offset().0;
|
||||
fsm.call_offsets.insert(offset, state_diff_id);
|
||||
fsm.call_offsets.insert(
|
||||
offset,
|
||||
OffsetInfo {
|
||||
activate_offset: offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Restore stack.
|
||||
@ -1642,6 +1667,31 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
state_diff_id,
|
||||
});
|
||||
|
||||
// Check interrupt signal without branching
|
||||
let activate_offset = a.get_offset().0;
|
||||
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
self.fsm.wasm_function_header_target_offset = Some(a.get_offset().0);
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
|
||||
assert_eq!(self.machine.state.wasm_inst_offset, ::std::usize::MAX);
|
||||
|
||||
Ok(())
|
||||
@ -3863,6 +3913,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
let label = a.get_label();
|
||||
let state_diff_id =
|
||||
Self::get_state_diff(&self.machine, &mut self.fsm, &mut self.control_stack);
|
||||
let activate_offset = a.get_offset().0;
|
||||
|
||||
self.control_stack.push(ControlFrame {
|
||||
label: label,
|
||||
loop_like: true,
|
||||
@ -3875,10 +3927,29 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
state: self.machine.state.clone(),
|
||||
state_diff_id,
|
||||
});
|
||||
self.fsm
|
||||
.loop_offsets
|
||||
.insert(a.get_offset().0, state_diff_id);
|
||||
a.emit_label(label);
|
||||
|
||||
// Check interrupt signal without branching
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
}
|
||||
Operator::Nop => {}
|
||||
Operator::MemorySize { reserved } => {
|
||||
|
@ -10,18 +10,15 @@
|
||||
//! unless you have memory unsafety elsewhere in your code.
|
||||
//!
|
||||
use std::any::Any;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::cell::Cell;
|
||||
use wasmer_runtime_core::alternative_stack::{
|
||||
begin_unsafe_unwind, catch_unsafe_unwind, ensure_sighandler,
|
||||
};
|
||||
use wasmer_runtime_core::codegen::BkptInfo;
|
||||
use wasmer_runtime_core::codegen::BkptMap;
|
||||
use wasmer_runtime_core::typed_func::WasmTrapInfo;
|
||||
|
||||
thread_local! {
|
||||
pub static TRAP_EARLY_DATA: Cell<Option<Box<dyn Any>>> = Cell::new(None);
|
||||
pub static BKPT_MAP: RefCell<Vec<Arc<HashMap<usize, Box<Fn(BkptInfo) + Send + Sync + 'static>>>>> = RefCell::new(Vec::new());
|
||||
}
|
||||
|
||||
pub unsafe fn trigger_trap() -> ! {
|
||||
@ -33,17 +30,20 @@ pub enum CallProtError {
|
||||
Error(Box<dyn Any>),
|
||||
}
|
||||
|
||||
pub fn call_protected<T>(f: impl FnOnce() -> T) -> Result<T, CallProtError> {
|
||||
pub fn call_protected<T>(
|
||||
f: impl FnOnce() -> T,
|
||||
breakpoints: Option<BkptMap>,
|
||||
) -> Result<T, CallProtError> {
|
||||
ensure_sighandler();
|
||||
unsafe {
|
||||
let ret = catch_unsafe_unwind(|| f());
|
||||
let ret = catch_unsafe_unwind(|| f(), breakpoints);
|
||||
match ret {
|
||||
Ok(x) => Ok(x),
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
if let Some(data) = TRAP_EARLY_DATA.with(|cell| cell.replace(None)) {
|
||||
Err(CallProtError::Error(data))
|
||||
} else {
|
||||
Err(CallProtError::Trap(WasmTrapInfo::Unknown))
|
||||
Err(CallProtError::Error(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -505,14 +505,6 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
mapped_dirs,
|
||||
);
|
||||
|
||||
#[cfg(feature = "backend:singlepass")]
|
||||
{
|
||||
if options.backend == Backend::Singlepass {
|
||||
use wasmer_runtime_core::suspend::patch_import_object;
|
||||
patch_import_object(&mut import_object);
|
||||
}
|
||||
}
|
||||
|
||||
let mut instance = module
|
||||
.instantiate(&import_object)
|
||||
.map_err(|e| format!("Can't instantiate module: {:?}", e))?;
|
||||
@ -543,6 +535,8 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let breakpoints = instance.module.runnable_module.get_breakpoints();
|
||||
|
||||
loop {
|
||||
let ret = if let Some(image) = image.take() {
|
||||
let msm = instance
|
||||
@ -558,10 +552,14 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
code_base,
|
||||
image,
|
||||
instance.context_mut(),
|
||||
breakpoints.clone(),
|
||||
)
|
||||
.map(|_| ())
|
||||
} else {
|
||||
catch_unsafe_unwind(|| start_raw(instance.context_mut()))
|
||||
catch_unsafe_unwind(
|
||||
|| start_raw(instance.context_mut()),
|
||||
breakpoints.clone(),
|
||||
)
|
||||
};
|
||||
if let Err(e) = ret {
|
||||
if let Some(new_image) = e.downcast_ref::<InstanceImage>() {
|
||||
@ -709,7 +707,7 @@ fn run(options: Run) {
|
||||
match execute_wasm(&options) {
|
||||
Ok(()) => {}
|
||||
Err(message) => {
|
||||
eprintln!("{:?}", message);
|
||||
eprintln!("execute_wasm: {:?}", message);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user