2019-02-21 00:41:41 +00:00
|
|
|
use crate::{cache::BackendCache, trampoline::Trampolines};
|
2019-02-07 00:26:45 +00:00
|
|
|
use crate::{
|
|
|
|
libcalls,
|
|
|
|
relocation::{
|
|
|
|
ExternalRelocation, LibCall, LocalRelocation, LocalTrapSink, Reloc, RelocSink,
|
|
|
|
RelocationType, TrapSink, VmCall, VmCallKind,
|
|
|
|
},
|
2019-02-08 01:08:42 +00:00
|
|
|
signal::HandlerData,
|
2019-02-07 00:26:45 +00:00
|
|
|
};
|
2019-02-25 20:03:48 +00:00
|
|
|
use rayon::prelude::*;
|
2019-02-07 00:26:45 +00:00
|
|
|
|
2019-01-11 03:59:57 +00:00
|
|
|
use byteorder::{ByteOrder, LittleEndian};
|
2019-01-09 02:57:28 +00:00
|
|
|
use cranelift_codegen::{ir, isa, Context};
|
2019-02-07 00:26:45 +00:00
|
|
|
use std::{
|
|
|
|
mem,
|
|
|
|
ptr::{write_unaligned, NonNull},
|
|
|
|
sync::Arc,
|
|
|
|
};
|
2019-02-19 23:36:22 +00:00
|
|
|
|
2019-02-07 00:26:45 +00:00
|
|
|
use wasmer_runtime_core::cache::Error as CacheError;
|
2019-01-22 19:02:06 +00:00
|
|
|
use wasmer_runtime_core::{
|
2019-01-08 21:04:03 +00:00
|
|
|
self,
|
2019-01-21 19:51:41 +00:00
|
|
|
backend::{
|
|
|
|
sys::{Memory, Protect},
|
2019-02-07 00:26:45 +00:00
|
|
|
SigRegistry,
|
2019-01-21 19:51:41 +00:00
|
|
|
},
|
2019-01-18 18:54:16 +00:00
|
|
|
error::{CompileError, CompileResult},
|
2019-02-07 00:26:45 +00:00
|
|
|
module::ModuleInfo,
|
|
|
|
structures::{Map, SliceMap, TypedIndex},
|
|
|
|
types::{FuncSig, LocalFuncIndex, SigIndex},
|
2019-01-09 02:57:28 +00:00
|
|
|
vm, vmcalls,
|
2019-01-08 21:04:03 +00:00
|
|
|
};
|
|
|
|
|
2019-02-08 21:51:29 +00:00
|
|
|
extern "C" {
|
|
|
|
#[cfg(not(target_os = "windows"))]
|
|
|
|
pub fn __rust_probestack();
|
|
|
|
#[cfg(all(target_os = "windows", target_pointer_width = "64"))]
|
|
|
|
pub fn __chkstk();
|
|
|
|
}
|
|
|
|
|
2019-02-21 00:41:41 +00:00
|
|
|
fn lookup_func(
|
|
|
|
map: &SliceMap<LocalFuncIndex, usize>,
|
|
|
|
memory: &Memory,
|
|
|
|
local_func_index: LocalFuncIndex,
|
|
|
|
) -> Option<NonNull<vm::Func>> {
|
2019-02-19 23:36:22 +00:00
|
|
|
let offset = *map.get(local_func_index)?;
|
|
|
|
let ptr = unsafe { memory.as_ptr().add(offset) };
|
|
|
|
|
|
|
|
NonNull::new(ptr).map(|nonnull| nonnull.cast())
|
|
|
|
}
|
|
|
|
|
2019-01-08 21:04:03 +00:00
|
|
|
#[allow(dead_code)]
|
|
|
|
pub struct FuncResolverBuilder {
|
2019-02-19 23:36:22 +00:00
|
|
|
map: Map<LocalFuncIndex, usize>,
|
|
|
|
memory: Memory,
|
2019-02-07 00:26:45 +00:00
|
|
|
local_relocs: Map<LocalFuncIndex, Box<[LocalRelocation]>>,
|
|
|
|
external_relocs: Map<LocalFuncIndex, Box<[ExternalRelocation]>>,
|
2019-01-23 06:31:58 +00:00
|
|
|
import_len: usize,
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl FuncResolverBuilder {
|
2019-02-07 00:26:45 +00:00
|
|
|
pub fn new_from_backend_cache(
|
|
|
|
backend_cache: BackendCache,
|
|
|
|
mut code: Memory,
|
|
|
|
info: &ModuleInfo,
|
2019-02-19 17:58:01 +00:00
|
|
|
) -> Result<(Self, Arc<Trampolines>, HandlerData), CacheError> {
|
2019-02-07 00:26:45 +00:00
|
|
|
unsafe {
|
|
|
|
code.protect(.., Protect::ReadWrite)
|
|
|
|
.map_err(|e| CacheError::Unknown(e.to_string()))?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let handler_data =
|
|
|
|
HandlerData::new(backend_cache.trap_sink, code.as_ptr() as _, code.size());
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
Self {
|
2019-02-19 23:36:22 +00:00
|
|
|
map: backend_cache.offsets,
|
|
|
|
memory: code,
|
2019-02-07 00:26:45 +00:00
|
|
|
local_relocs: Map::new(),
|
|
|
|
external_relocs: backend_cache.external_relocs,
|
|
|
|
import_len: info.imported_functions.len(),
|
|
|
|
},
|
2019-02-21 00:41:41 +00:00
|
|
|
Arc::new(Trampolines::from_trampoline_cache(
|
|
|
|
backend_cache.trampolines,
|
|
|
|
)),
|
2019-02-07 00:26:45 +00:00
|
|
|
handler_data,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2019-01-09 02:57:28 +00:00
|
|
|
pub fn new(
|
2019-08-08 22:46:52 +00:00
|
|
|
isa: &dyn isa::TargetIsa,
|
2019-01-17 01:59:12 +00:00
|
|
|
function_bodies: Map<LocalFuncIndex, ir::Function>,
|
2019-02-07 00:26:45 +00:00
|
|
|
info: &ModuleInfo,
|
2019-01-19 00:45:30 +00:00
|
|
|
) -> CompileResult<(Self, HandlerData)> {
|
2019-02-25 20:03:48 +00:00
|
|
|
let num_func_bodies = function_bodies.len();
|
|
|
|
let mut local_relocs = Map::with_capacity(num_func_bodies);
|
|
|
|
let mut external_relocs = Map::with_capacity(num_func_bodies);
|
2019-01-19 00:45:30 +00:00
|
|
|
|
|
|
|
let mut trap_sink = TrapSink::new();
|
2019-01-08 21:04:03 +00:00
|
|
|
|
2019-02-25 20:03:48 +00:00
|
|
|
let compiled_functions: Result<Vec<(Vec<u8>, (RelocSink, LocalTrapSink))>, CompileError> =
|
|
|
|
function_bodies
|
|
|
|
.into_vec()
|
|
|
|
.par_iter()
|
|
|
|
.map_init(
|
|
|
|
|| Context::new(),
|
|
|
|
|ctx, func| {
|
|
|
|
let mut code_buf = Vec::new();
|
|
|
|
ctx.func = func.to_owned();
|
|
|
|
let mut reloc_sink = RelocSink::new();
|
|
|
|
let mut local_trap_sink = LocalTrapSink::new();
|
|
|
|
|
|
|
|
ctx.compile_and_emit(
|
|
|
|
isa,
|
|
|
|
&mut code_buf,
|
|
|
|
&mut reloc_sink,
|
|
|
|
&mut local_trap_sink,
|
|
|
|
)
|
|
|
|
.map_err(|e| CompileError::InternalError { msg: e.to_string() })?;
|
|
|
|
ctx.clear();
|
|
|
|
Ok((code_buf, (reloc_sink, local_trap_sink)))
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let compiled_functions = compiled_functions?;
|
2019-01-08 21:04:03 +00:00
|
|
|
let mut total_size = 0;
|
2019-02-25 20:03:48 +00:00
|
|
|
// We separate into two iterators, one iterable and one into iterable
|
|
|
|
let (code_bufs, sinks): (Vec<Vec<u8>>, Vec<(RelocSink, LocalTrapSink)>) =
|
|
|
|
compiled_functions.into_iter().unzip();
|
|
|
|
for (code_buf, (reloc_sink, mut local_trap_sink)) in code_bufs.iter().zip(sinks.into_iter())
|
|
|
|
{
|
2019-01-19 00:45:30 +00:00
|
|
|
// Clear the local trap sink and consolidate all trap info
|
|
|
|
// into a single location.
|
|
|
|
trap_sink.drain_local(total_size, &mut local_trap_sink);
|
|
|
|
|
2019-01-08 21:04:03 +00:00
|
|
|
// Round up each function's size to pointer alignment.
|
|
|
|
total_size += round_up(code_buf.len(), mem::size_of::<usize>());
|
|
|
|
|
2019-02-07 00:26:45 +00:00
|
|
|
local_relocs.push(reloc_sink.local_relocs.into_boxed_slice());
|
|
|
|
external_relocs.push(reloc_sink.external_relocs.into_boxed_slice());
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:51:41 +00:00
|
|
|
let mut memory = Memory::with_size(total_size)
|
2019-01-18 18:54:16 +00:00
|
|
|
.map_err(|e| CompileError::InternalError { msg: e.to_string() })?;
|
2019-01-08 21:04:03 +00:00
|
|
|
unsafe {
|
2019-01-18 18:54:16 +00:00
|
|
|
memory
|
2019-01-23 17:37:56 +00:00
|
|
|
.protect(.., Protect::ReadWrite)
|
2019-01-18 18:54:16 +00:00
|
|
|
.map_err(|e| CompileError::InternalError { msg: e.to_string() })?;
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
2019-01-18 18:54:16 +00:00
|
|
|
// Normally, excess memory due to alignment and page-rounding would
|
|
|
|
// be filled with null-bytes. On x86 (and x86_64),
|
|
|
|
// "\x00\x00" disassembles to "add byte ptr [eax],al".
|
|
|
|
//
|
|
|
|
// If the instruction pointer falls out of its designated area,
|
|
|
|
// it would be better if it would immediately crash instead of
|
|
|
|
// continuing on and causing non-local issues.
|
|
|
|
//
|
|
|
|
// "\xCC" disassembles to "int3", which will immediately cause
|
|
|
|
// an interrupt that we can catch if we want.
|
2019-01-09 23:31:11 +00:00
|
|
|
for i in unsafe { memory.as_slice_mut() } {
|
|
|
|
*i = 0xCC;
|
|
|
|
}
|
|
|
|
|
2019-02-25 20:03:48 +00:00
|
|
|
let mut map = Map::with_capacity(num_func_bodies);
|
2019-01-08 21:04:03 +00:00
|
|
|
|
|
|
|
let mut previous_end = 0;
|
2019-02-25 20:03:48 +00:00
|
|
|
for compiled in code_bufs.iter() {
|
2019-01-08 21:04:03 +00:00
|
|
|
let new_end = previous_end + round_up(compiled.len(), mem::size_of::<usize>());
|
|
|
|
unsafe {
|
2019-01-09 02:57:28 +00:00
|
|
|
memory.as_slice_mut()[previous_end..previous_end + compiled.len()]
|
|
|
|
.copy_from_slice(&compiled[..]);
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
map.push(previous_end);
|
|
|
|
previous_end = new_end;
|
|
|
|
}
|
|
|
|
|
2019-02-21 00:41:41 +00:00
|
|
|
let handler_data =
|
|
|
|
HandlerData::new(Arc::new(trap_sink), memory.as_ptr() as _, memory.size());
|
2019-01-19 00:45:30 +00:00
|
|
|
|
2019-02-07 00:26:45 +00:00
|
|
|
let mut func_resolver_builder = Self {
|
2019-02-19 23:36:22 +00:00
|
|
|
map,
|
|
|
|
memory,
|
2019-02-07 00:26:45 +00:00
|
|
|
local_relocs,
|
|
|
|
external_relocs,
|
|
|
|
import_len: info.imported_functions.len(),
|
|
|
|
};
|
|
|
|
|
|
|
|
func_resolver_builder.relocate_locals();
|
|
|
|
|
|
|
|
Ok((func_resolver_builder, handler_data))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn relocate_locals(&mut self) {
|
|
|
|
for (index, relocs) in self.local_relocs.iter() {
|
|
|
|
for ref reloc in relocs.iter() {
|
|
|
|
let local_func_index = LocalFuncIndex::new(reloc.target.index() - self.import_len);
|
2019-02-21 00:41:41 +00:00
|
|
|
let target_func_address = lookup_func(&self.map, &self.memory, local_func_index)
|
|
|
|
.unwrap()
|
|
|
|
.as_ptr() as usize;
|
2019-02-07 00:26:45 +00:00
|
|
|
|
|
|
|
// We need the address of the current function
|
|
|
|
// because these calls are relative.
|
2019-02-21 00:41:41 +00:00
|
|
|
let func_addr = lookup_func(&self.map, &self.memory, index)
|
|
|
|
.unwrap()
|
|
|
|
.as_ptr() as usize;
|
2019-02-07 00:26:45 +00:00
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let reloc_address = func_addr + reloc.offset as usize;
|
|
|
|
let reloc_delta = target_func_address
|
|
|
|
.wrapping_sub(reloc_address)
|
|
|
|
.wrapping_add(reloc.addend as usize);
|
|
|
|
|
|
|
|
write_unaligned(reloc_address as *mut u32, reloc_delta as u32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 00:26:45 +00:00
|
|
|
pub fn finalize(
|
|
|
|
mut self,
|
2019-02-23 00:20:26 +00:00
|
|
|
signatures: &SliceMap<SigIndex, FuncSig>,
|
2019-02-19 17:58:01 +00:00
|
|
|
trampolines: Arc<Trampolines>,
|
|
|
|
handler_data: HandlerData,
|
|
|
|
) -> CompileResult<(FuncResolver, BackendCache)> {
|
2019-02-07 00:26:45 +00:00
|
|
|
for (index, relocs) in self.external_relocs.iter() {
|
|
|
|
for ref reloc in relocs.iter() {
|
2019-01-08 21:04:03 +00:00
|
|
|
let target_func_address: isize = match reloc.target {
|
|
|
|
RelocationType::LibCall(libcall) => match libcall {
|
2019-02-07 00:26:45 +00:00
|
|
|
LibCall::CeilF32 => libcalls::ceilf32 as isize,
|
|
|
|
LibCall::FloorF32 => libcalls::floorf32 as isize,
|
|
|
|
LibCall::TruncF32 => libcalls::truncf32 as isize,
|
|
|
|
LibCall::NearestF32 => libcalls::nearbyintf32 as isize,
|
|
|
|
LibCall::CeilF64 => libcalls::ceilf64 as isize,
|
|
|
|
LibCall::FloorF64 => libcalls::floorf64 as isize,
|
|
|
|
LibCall::TruncF64 => libcalls::truncf64 as isize,
|
|
|
|
LibCall::NearestF64 => libcalls::nearbyintf64 as isize,
|
2019-02-08 21:51:29 +00:00
|
|
|
#[cfg(all(target_pointer_width = "64", target_os = "windows"))]
|
2019-02-14 17:58:33 +00:00
|
|
|
LibCall::Probestack => __chkstk as isize,
|
2019-02-08 21:51:29 +00:00
|
|
|
#[cfg(not(target_os = "windows"))]
|
2019-02-14 17:58:33 +00:00
|
|
|
LibCall::Probestack => __rust_probestack as isize,
|
2019-01-08 21:04:03 +00:00
|
|
|
},
|
2019-02-06 23:43:27 +00:00
|
|
|
RelocationType::Intrinsic(ref name) => match name.as_str() {
|
|
|
|
"i32print" => i32_print as isize,
|
|
|
|
"i64print" => i64_print as isize,
|
|
|
|
"f32print" => f32_print as isize,
|
|
|
|
"f64print" => f64_print as isize,
|
|
|
|
"strtdbug" => start_debug as isize,
|
|
|
|
"enddbug" => end_debug as isize,
|
|
|
|
_ => Err(CompileError::InternalError {
|
|
|
|
msg: format!("unexpected intrinsic: {}", name),
|
|
|
|
})?,
|
|
|
|
},
|
2019-01-17 21:09:05 +00:00
|
|
|
RelocationType::VmCall(vmcall) => match vmcall {
|
2019-01-25 23:28:54 +00:00
|
|
|
VmCall::Local(kind) => match kind {
|
|
|
|
VmCallKind::StaticMemoryGrow => vmcalls::local_static_memory_grow as _,
|
|
|
|
VmCallKind::StaticMemorySize => vmcalls::local_static_memory_size as _,
|
|
|
|
|
|
|
|
VmCallKind::SharedStaticMemoryGrow => unimplemented!(),
|
|
|
|
VmCallKind::SharedStaticMemorySize => unimplemented!(),
|
|
|
|
|
|
|
|
VmCallKind::DynamicMemoryGrow => {
|
|
|
|
vmcalls::local_dynamic_memory_grow as _
|
|
|
|
}
|
|
|
|
VmCallKind::DynamicMemorySize => {
|
|
|
|
vmcalls::local_dynamic_memory_size as _
|
|
|
|
}
|
|
|
|
},
|
|
|
|
VmCall::Import(kind) => match kind {
|
|
|
|
VmCallKind::StaticMemoryGrow => {
|
|
|
|
vmcalls::imported_static_memory_grow as _
|
|
|
|
}
|
|
|
|
VmCallKind::StaticMemorySize => {
|
|
|
|
vmcalls::imported_static_memory_size as _
|
|
|
|
}
|
|
|
|
|
|
|
|
VmCallKind::SharedStaticMemoryGrow => unimplemented!(),
|
|
|
|
VmCallKind::SharedStaticMemorySize => unimplemented!(),
|
|
|
|
|
|
|
|
VmCallKind::DynamicMemoryGrow => {
|
|
|
|
vmcalls::imported_dynamic_memory_grow as _
|
|
|
|
}
|
|
|
|
VmCallKind::DynamicMemorySize => {
|
|
|
|
vmcalls::imported_dynamic_memory_size as _
|
|
|
|
}
|
|
|
|
},
|
2019-01-17 21:09:05 +00:00
|
|
|
},
|
2019-02-07 00:26:45 +00:00
|
|
|
RelocationType::Signature(sig_index) => {
|
2019-02-08 22:19:58 +00:00
|
|
|
let signature = SigRegistry.lookup_signature_ref(&signatures[sig_index]);
|
|
|
|
let sig_index = SigRegistry.lookup_sig_index(signature);
|
2019-02-07 00:26:45 +00:00
|
|
|
sig_index.index() as _
|
|
|
|
}
|
2019-01-08 21:04:03 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// We need the address of the current function
|
2019-02-07 00:26:45 +00:00
|
|
|
// because some of these calls are relative.
|
2019-02-21 00:41:41 +00:00
|
|
|
let func_addr = lookup_func(&self.map, &self.memory, index)
|
|
|
|
.unwrap()
|
|
|
|
.as_ptr() as usize;
|
2019-01-08 21:04:03 +00:00
|
|
|
|
|
|
|
// Determine relocation type and apply relocation.
|
|
|
|
match reloc.reloc {
|
2019-01-10 01:32:02 +00:00
|
|
|
Reloc::Abs8 => {
|
|
|
|
let ptr_to_write = (target_func_address as u64)
|
|
|
|
.checked_add(reloc.addend as u64)
|
2019-01-09 23:31:11 +00:00
|
|
|
.unwrap();
|
2019-02-19 23:36:22 +00:00
|
|
|
let empty_space_offset = self.map[index] + reloc.offset as usize;
|
2019-01-10 01:32:02 +00:00
|
|
|
let ptr_slice = unsafe {
|
2019-02-19 23:36:22 +00:00
|
|
|
&mut self.memory.as_slice_mut()
|
2019-01-11 03:59:57 +00:00
|
|
|
[empty_space_offset..empty_space_offset + 8]
|
2019-01-10 01:32:02 +00:00
|
|
|
};
|
2019-01-11 03:59:57 +00:00
|
|
|
LittleEndian::write_u64(ptr_slice, ptr_to_write);
|
|
|
|
}
|
2019-02-07 00:26:45 +00:00
|
|
|
Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => unsafe {
|
|
|
|
let reloc_address = (func_addr as usize) + reloc.offset as usize;
|
|
|
|
let reloc_delta = target_func_address
|
|
|
|
.wrapping_sub(reloc_address as isize)
|
|
|
|
.wrapping_add(reloc.addend as isize);
|
|
|
|
|
|
|
|
write_unaligned(reloc_address as *mut u32, reloc_delta as u32);
|
2019-01-08 21:04:03 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe {
|
2019-02-19 23:36:22 +00:00
|
|
|
self.memory
|
2019-01-23 17:37:56 +00:00
|
|
|
.protect(.., Protect::ReadExec)
|
2019-01-22 23:00:27 +00:00
|
|
|
.map_err(|e| CompileError::InternalError { msg: e.to_string() })?;
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
2019-02-19 17:58:01 +00:00
|
|
|
let backend_cache = BackendCache {
|
|
|
|
external_relocs: self.external_relocs.clone(),
|
2019-02-19 23:36:22 +00:00
|
|
|
offsets: self.map.clone(),
|
2019-02-19 17:58:01 +00:00
|
|
|
trap_sink: handler_data.trap_data,
|
|
|
|
trampolines: trampolines.to_trampoline_cache(),
|
|
|
|
};
|
|
|
|
|
2019-02-21 00:41:41 +00:00
|
|
|
Ok((
|
|
|
|
FuncResolver {
|
|
|
|
map: self.map,
|
|
|
|
memory: Arc::new(self.memory),
|
|
|
|
},
|
|
|
|
backend_cache,
|
|
|
|
))
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-19 17:58:01 +00:00
|
|
|
unsafe impl Sync for FuncResolver {}
|
|
|
|
unsafe impl Send for FuncResolver {}
|
|
|
|
|
2019-01-08 21:04:03 +00:00
|
|
|
/// Resolves a function index to a function address.
|
|
|
|
pub struct FuncResolver {
|
2019-01-16 18:26:10 +00:00
|
|
|
map: Map<LocalFuncIndex, usize>,
|
2019-02-19 23:36:22 +00:00
|
|
|
pub(crate) memory: Arc<Memory>,
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
|
2019-04-12 16:58:29 +00:00
|
|
|
impl FuncResolver {
|
|
|
|
pub fn lookup(&self, index: LocalFuncIndex) -> Option<NonNull<vm::Func>> {
|
2019-02-19 23:36:22 +00:00
|
|
|
lookup_func(&self.map, &self.memory, index)
|
2019-01-08 21:04:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn round_up(n: usize, multiple: usize) -> usize {
|
|
|
|
(n + multiple - 1) & !(multiple - 1)
|
2019-01-11 03:59:57 +00:00
|
|
|
}
|
2019-02-06 23:43:27 +00:00
|
|
|
|
2019-02-09 22:19:04 +00:00
|
|
|
extern "C" fn i32_print(_ctx: &mut vm::Ctx, n: i32) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!(" i32: {},", n);
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|
2019-02-09 22:19:04 +00:00
|
|
|
extern "C" fn i64_print(_ctx: &mut vm::Ctx, n: i64) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!(" i64: {},", n);
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|
2019-02-09 22:19:04 +00:00
|
|
|
extern "C" fn f32_print(_ctx: &mut vm::Ctx, n: f32) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!(" f32: {},", n);
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|
2019-02-09 22:19:04 +00:00
|
|
|
extern "C" fn f64_print(_ctx: &mut vm::Ctx, n: f64) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!(" f64: {},", n);
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|
2019-03-26 23:41:40 +00:00
|
|
|
extern "C" fn start_debug(ctx: &mut vm::Ctx, func_index: u32) {
|
2019-03-27 21:01:27 +00:00
|
|
|
if let Some(symbol_map) = unsafe { ctx.borrow_symbol_map() } {
|
2019-03-26 23:41:40 +00:00
|
|
|
if let Some(fn_name) = symbol_map.get(&func_index) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!("func ({} ({})), args: [", fn_name, func_index);
|
2019-03-26 23:41:40 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 21:01:27 +00:00
|
|
|
eprint!("func ({}), args: [", func_index);
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|
2019-02-09 22:19:04 +00:00
|
|
|
extern "C" fn end_debug(_ctx: &mut vm::Ctx) {
|
2019-03-27 21:01:27 +00:00
|
|
|
eprintln!(" ]");
|
2019-02-06 23:43:27 +00:00
|
|
|
}
|