Improve performance of memory access.

This commit is contained in:
Heyang Zhou 2019-05-13 05:11:08 -07:00
parent 620a6ddd85
commit af1ac9af96
3 changed files with 100 additions and 44 deletions

View File

@ -67,6 +67,9 @@ pub struct InternalCtx {
pub intrinsics: *const Intrinsics,
pub stack_lower_bound: *mut u8,
pub memory_base: *mut u8,
pub memory_bound: usize,
}
#[repr(C)]
@ -145,6 +148,16 @@ impl Ctx {
import_backing: &mut ImportBacking,
module: &ModuleInner,
) -> Self {
let (mem_base, mem_bound): (*mut u8, usize) =
if module.info.memories.len() == 0 && module.info.imported_memories.len() == 0 {
(::std::ptr::null_mut(), 0)
} else {
let mem = match MemoryIndex::new(0).local_or_import(&module.info) {
LocalOrImport::Local(index) => local_backing.vm_memories[index],
LocalOrImport::Import(index) => import_backing.vm_memories[index],
};
((*mem).base, (*mem).bound)
};
Self {
internal: InternalCtx {
memories: local_backing.vm_memories.as_mut_ptr(),
@ -161,6 +174,9 @@ impl Ctx {
intrinsics: get_intrinsics_for_module(&module.info),
stack_lower_bound: ::std::ptr::null_mut(),
memory_base: mem_base,
memory_bound: mem_bound,
},
local_functions: local_backing.local_functions.as_ptr(),
@ -181,6 +197,16 @@ impl Ctx {
data: *mut c_void,
data_finalizer: fn(*mut c_void),
) -> Self {
let (mem_base, mem_bound): (*mut u8, usize) =
if module.info.memories.len() == 0 && module.info.imported_memories.len() == 0 {
(::std::ptr::null_mut(), 0)
} else {
let mem = match MemoryIndex::new(0).local_or_import(&module.info) {
LocalOrImport::Local(index) => local_backing.vm_memories[index],
LocalOrImport::Import(index) => import_backing.vm_memories[index],
};
((*mem).base, (*mem).bound)
};
Self {
internal: InternalCtx {
memories: local_backing.vm_memories.as_mut_ptr(),
@ -197,6 +223,9 @@ impl Ctx {
intrinsics: get_intrinsics_for_module(&module.info),
stack_lower_bound: ::std::ptr::null_mut(),
memory_base: mem_base,
memory_bound: mem_bound,
},
local_functions: local_backing.local_functions.as_ptr(),
@ -298,9 +327,17 @@ impl Ctx {
9 * (mem::size_of::<usize>() as u8)
}
pub fn offset_local_functions() -> u8 {
pub fn offset_memory_base() -> u8 {
10 * (mem::size_of::<usize>() as u8)
}
pub fn offset_memory_bound() -> u8 {
11 * (mem::size_of::<usize>() as u8)
}
pub fn offset_local_functions() -> u8 {
12 * (mem::size_of::<usize>() as u8)
}
}
enum InnerFunc {}
@ -504,6 +541,16 @@ mod vm_offset_tests {
offset_of!(InternalCtx => stack_lower_bound).get_byte_offset(),
);
assert_eq!(
Ctx::offset_memory_base() as usize,
offset_of!(InternalCtx => memory_base).get_byte_offset(),
);
assert_eq!(
Ctx::offset_memory_bound() as usize,
offset_of!(InternalCtx => memory_bound).get_byte_offset(),
);
assert_eq!(
Ctx::offset_local_functions() as usize,
offset_of!(Ctx => local_functions).get_byte_offset(),

View File

@ -20,10 +20,15 @@ pub unsafe extern "C" fn local_static_memory_grow(
let local_memory = *ctx.internal.memories.add(memory_index.index());
let memory = (*local_memory).memory as *mut StaticMemory;
match (*memory).grow(delta, &mut *local_memory) {
let ret = match (*memory).grow(delta, &mut *local_memory) {
Ok(old) => old.0 as i32,
Err(_) => -1,
}
};
ctx.internal.memory_base = (*local_memory).base;
ctx.internal.memory_bound = (*local_memory).bound;
ret
}
pub unsafe extern "C" fn local_static_memory_size(
@ -44,10 +49,15 @@ pub unsafe extern "C" fn local_dynamic_memory_grow(
let local_memory = *ctx.internal.memories.add(memory_index.index());
let memory = (*local_memory).memory as *mut DynamicMemory;
match (*memory).grow(delta, &mut *local_memory) {
let ret = match (*memory).grow(delta, &mut *local_memory) {
Ok(old) => old.0 as i32,
Err(_) => -1,
}
};
ctx.internal.memory_base = (*local_memory).base;
ctx.internal.memory_bound = (*local_memory).bound;
ret
}
pub unsafe extern "C" fn local_dynamic_memory_size(
@ -75,10 +85,15 @@ pub unsafe extern "C" fn imported_static_memory_grow(
.add(import_memory_index.index());
let memory = (*local_memory).memory as *mut StaticMemory;
match (*memory).grow(delta, &mut *local_memory) {
let ret = match (*memory).grow(delta, &mut *local_memory) {
Ok(old) => old.0 as i32,
Err(_) => -1,
}
};
ctx.internal.memory_base = (*local_memory).base;
ctx.internal.memory_bound = (*local_memory).bound;
ret
}
pub unsafe extern "C" fn imported_static_memory_size(
@ -102,10 +117,15 @@ pub unsafe extern "C" fn imported_dynamic_memory_grow(
let local_memory = *ctx.internal.imported_memories.add(memory_index.index());
let memory = (*local_memory).memory as *mut DynamicMemory;
match (*memory).grow(delta, &mut *local_memory) {
let ret = match (*memory).grow(delta, &mut *local_memory) {
Ok(old) => old.0 as i32,
Err(_) => -1,
}
};
ctx.internal.memory_base = (*local_memory).base;
ctx.internal.memory_bound = (*local_memory).bound;
ret
}
pub unsafe extern "C" fn imported_dynamic_memory_size(

View File

@ -1201,41 +1201,6 @@ impl X64FunctionCode {
value_size: usize,
cb: F,
) {
let tmp_addr = m.acquire_temp_gpr().unwrap();
let tmp_base = m.acquire_temp_gpr().unwrap();
let tmp_bound = m.acquire_temp_gpr().unwrap();
// Loads both base and bound into temporary registers.
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
match MemoryIndex::new(0).local_or_import(module_info) {
LocalOrImport::Local(_) => vm::Ctx::offset_memories(),
LocalOrImport::Import(_) => vm::Ctx::offset_imported_memories(),
} as i32,
),
Location::GPR(tmp_base),
);
a.emit_mov(
Size::S64,
Location::Memory(tmp_base, 0),
Location::GPR(tmp_base),
);
a.emit_mov(
Size::S32,
Location::Memory(tmp_base, LocalMemory::offset_bound() as i32),
Location::GPR(tmp_bound),
);
a.emit_mov(
Size::S64,
Location::Memory(tmp_base, LocalMemory::offset_base() as i32),
Location::GPR(tmp_base),
);
// Adds base to bound so `tmp_bound` now holds the end of linear memory.
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_bound));
// If the memory is dynamic, we need to do bound checking at runtime.
let mem_desc = match MemoryIndex::new(0).local_or_import(module_info) {
LocalOrImport::Local(local_mem_index) => &module_info.memories[local_mem_index],
@ -1251,8 +1216,32 @@ impl X64FunctionCode {
MemoryBoundCheckMode::Enable => true,
MemoryBoundCheckMode::Disable => false,
};
let tmp_addr = m.acquire_temp_gpr().unwrap();
let tmp_base = m.acquire_temp_gpr().unwrap();
let tmp_bound = m.acquire_temp_gpr().unwrap();
// Load base into temporary register.
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_memory_base() as i32,
),
Location::GPR(tmp_base),
);
if need_check {
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_memory_bound() as i32,
),
Location::GPR(tmp_bound),
);
// Adds base to bound so `tmp_bound` now holds the end of linear memory.
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_bound));
a.emit_mov(Size::S32, addr, Location::GPR(tmp_addr));
// This branch is used for emitting "faster" code for the special case of (offset + value_size) not exceeding u32 range.