From 7fd20e33556690f0f01380890e1e422e4d5bda96 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Tue, 29 Oct 2019 12:14:14 -0700 Subject: [PATCH 01/11] NFC: Fold variable into initializer. --- lib/llvm-backend/src/code.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index bd13e0ea2..0a8df9084 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -7316,15 +7316,13 @@ impl ModuleCodeGenerator Some(Linkage::External), ); - let signatures = Map::new(); - LLVMModuleCodeGenerator { context: Some(context), builder: Some(builder), intrinsics: Some(intrinsics), module, functions: vec![], - signatures, + signatures: Map::new(), signatures_raw: Map::new(), function_signatures: None, func_import_count: 0, From f77d9bfe3247dbbd70a7579bcfbd0f1535dc9677 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Wed, 30 Oct 2019 10:29:51 -0700 Subject: [PATCH 02/11] Initial implementation of TBAA for the LLVM backend. --- Cargo.lock | 4 +- lib/llvm-backend/src/code.rs | 473 +++++++++++++++++++++++++++++++++-- 2 files changed, 460 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 917724f89..1c0527cdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -514,7 +514,7 @@ dependencies = [ [[package]] name = "inkwell" version = "0.1.0" -source = "git+https://github.com/wasmerio/inkwell?branch=llvm8-0#57e192cdccd6cde6ee5ee0a7e0b280490126d5c6" +source = "git+https://github.com/wasmerio/inkwell?branch=llvm8-0#10d180807ce6e621ae13d74001bf5677b0e1f179" dependencies = [ "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "enum-methods 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -528,7 +528,7 @@ dependencies = [ [[package]] name = "inkwell_internal_macros" version = "0.1.0" -source = "git+https://github.com/wasmerio/inkwell?branch=llvm8-0#57e192cdccd6cde6ee5ee0a7e0b280490126d5c6" +source = "git+https://github.com/wasmerio/inkwell?branch=llvm8-0#10d180807ce6e621ae13d74001bf5677b0e1f179" dependencies = [ "cargo_toml 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index 0a8df9084..b5d7dd57b 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -6,8 +6,8 @@ use inkwell::{ targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine}, types::{BasicType, BasicTypeEnum, FunctionType, PointerType, VectorType}, values::{ - BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue, - VectorValue, + BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, MetadataValue, PhiValue, + PointerValue, VectorValue, }, AddressSpace, AtomicOrdering, AtomicRMWBinOp, FloatPredicate, IntPredicate, OptimizationLevel, }; @@ -558,6 +558,8 @@ fn resolve_memory_ptr( memarg: &MemoryImmediate, ptr_ty: PointerType, value_size: usize, + context_field_ptr_to_base_tbaa: MetadataValue, + context_field_ptr_to_bounds_tbaa: MetadataValue, ) -> Result { // Look up the memory base (as pointer) and bounds (as unsigned integer). let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics); @@ -570,6 +572,14 @@ fn resolve_memory_ptr( .build_load(ptr_to_base_ptr, "base") .into_pointer_value(); let bounds = builder.build_load(ptr_to_bounds, "bounds").into_int_value(); + let tbaa_kind = context.get_kind_id("tbaa"); + base.as_instruction_value() + .unwrap() + .set_metadata(context_field_ptr_to_base_tbaa, tbaa_kind); + bounds + .as_instruction_value() + .unwrap() + .set_metadata(context_field_ptr_to_bounds_tbaa, tbaa_kind); (base, bounds) } MemoryCache::Static { base_ptr, bounds } => (base_ptr, bounds), @@ -808,6 +818,11 @@ pub struct LLVMModuleCodeGenerator { stackmaps: Rc>, track_state: bool, target_machine: TargetMachine, + memory_tbaa: MetadataValue, + locals_tbaa: MetadataValue, + globals_tbaa: MetadataValue, + context_field_ptr_to_base_tbaa: MetadataValue, + context_field_ptr_to_bounds_tbaa: MetadataValue, } pub struct LLVMFunctionCodeGenerator { @@ -827,6 +842,11 @@ pub struct LLVMFunctionCodeGenerator { index: usize, opcode_offset: usize, track_state: bool, + memory_tbaa: MetadataValue, + locals_tbaa: MetadataValue, + globals_tbaa: MetadataValue, + context_field_ptr_to_base_tbaa: MetadataValue, + context_field_ptr_to_bounds_tbaa: MetadataValue, } impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { @@ -1528,19 +1548,27 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { Operator::GetLocal { local_index } => { let pointer_value = locals[local_index as usize]; let v = builder.build_load(pointer_value, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + v.as_instruction_value() + .unwrap() + .set_metadata(self.locals_tbaa, tbaa_kind); state.push1(v); } Operator::SetLocal { local_index } => { let pointer_value = locals[local_index as usize]; let (v, i) = state.pop1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); - builder.build_store(pointer_value, v); + let store = builder.build_store(pointer_value, v); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.locals_tbaa, tbaa_kind); } Operator::TeeLocal { local_index } => { let pointer_value = locals[local_index as usize]; let (v, i) = state.peek1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); - builder.build_store(pointer_value, v); + let store = builder.build_store(pointer_value, v); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.locals_tbaa, tbaa_kind); } Operator::GetGlobal { global_index } => { @@ -1552,6 +1580,11 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { } GlobalCache::Mut { ptr_to_value } => { let value = builder.build_load(ptr_to_value, "global_value"); + let tbaa_kind = context.get_kind_id("tbaa"); + value + .as_instruction_value() + .unwrap() + .set_metadata(self.locals_tbaa, tbaa_kind); state.push1(value); } } @@ -1563,7 +1596,9 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { let global_cache = ctx.global_cache(index, intrinsics); match global_cache { GlobalCache::Mut { ptr_to_value } => { - builder.build_store(ptr_to_value, value); + let store = builder.build_store(ptr_to_value, value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.globals_tbaa, tbaa_kind); } GlobalCache::Const { value: _ } => { return Err(CodegenError { @@ -4360,8 +4395,15 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load { ref memarg } => { @@ -4375,8 +4417,15 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::F32Load { ref memarg } => { @@ -4390,8 +4439,15 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.f32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::F64Load { ref memarg } => { @@ -4405,8 +4461,15 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.f64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::V128Load { ref memarg } => { @@ -4420,8 +4483,15 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i128_ptr_ty, 16, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } @@ -4437,8 +4507,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; - builder.build_store(effective_address, value); + let store = builder.build_store(effective_address, value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I64Store { ref memarg } => { let value = state.pop1()?; @@ -4452,8 +4526,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; - builder.build_store(effective_address, value); + let store = builder.build_store(effective_address, value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::F32Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4468,8 +4546,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.f32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; - builder.build_store(effective_address, v); + let store = builder.build_store(effective_address, v); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::F64Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4484,8 +4566,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.f64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; - builder.build_store(effective_address, v); + let store = builder.build_store(effective_address, v); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::V128Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4500,10 +4586,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i128_ptr_ty, 16, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; - builder.build_store(effective_address, v); + let store = builder.build_store(effective_address, v); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } - Operator::I32Load8S { ref memarg } => { let effective_address = resolve_memory_ptr( builder, @@ -4515,12 +4604,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_s_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I32Load16S { ref memarg } => { @@ -4534,12 +4630,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_s_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load8S { ref memarg } => { @@ -4553,12 +4656,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load16S { ref memarg } => { @@ -4572,12 +4682,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load32S { ref memarg } => { @@ -4591,12 +4708,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } @@ -4611,12 +4735,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I32Load16U { ref memarg } => { @@ -4630,12 +4761,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load8U { ref memarg } => { @@ -4649,12 +4787,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load16U { ref memarg } => { @@ -4668,12 +4813,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load32U { ref memarg } => { @@ -4687,12 +4839,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); + let tbaa_kind = context.get_kind_id("tbaa"); + result + .as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } @@ -4708,10 +4867,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i8_ty, &state.var_name()); - builder.build_store(effective_address, narrow_value); + let store = builder.build_store(effective_address, narrow_value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I32Store16 { ref memarg } | Operator::I64Store16 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -4725,10 +4888,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i16_ty, &state.var_name()); - builder.build_store(effective_address, narrow_value); + let store = builder.build_store(effective_address, narrow_value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I64Store32 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -4742,10 +4909,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i32_ty, &state.var_name()); - builder.build_store(effective_address, narrow_value); + let store = builder.build_store(effective_address, narrow_value); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I8x16Neg => { let (v, i) = state.pop1_extra()?; @@ -5046,8 +5217,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let elem = builder.build_load(effective_address, "").into_int_value(); + let tbaa_kind = context.get_kind_id("tbaa"); + elem.as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); let res = splat_vector( builder, intrinsics, @@ -5069,8 +5246,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let elem = builder.build_load(effective_address, "").into_int_value(); + let tbaa_kind = context.get_kind_id("tbaa"); + elem.as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); let res = splat_vector( builder, intrinsics, @@ -5092,8 +5275,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let elem = builder.build_load(effective_address, "").into_int_value(); + let tbaa_kind = context.get_kind_id("tbaa"); + elem.as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); let res = splat_vector( builder, intrinsics, @@ -5115,8 +5304,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; let elem = builder.build_load(effective_address, "").into_int_value(); + let tbaa_kind = context.get_kind_id("tbaa"); + elem.as_instruction_value() + .unwrap() + .set_metadata(self.memory_tbaa, tbaa_kind); let res = splat_vector( builder, intrinsics, @@ -5147,6 +5342,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5161,6 +5358,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(4).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64AtomicLoad { ref memarg } => { @@ -5174,6 +5373,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5188,6 +5389,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(8).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I32AtomicLoad8U { ref memarg } => { @@ -5201,6 +5404,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5217,6 +5422,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(1).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); state.push1(result); @@ -5232,6 +5439,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5248,6 +5457,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(2).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); state.push1(result); @@ -5263,6 +5474,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5279,6 +5492,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(1).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5294,6 +5509,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5310,6 +5527,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(2).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5325,6 +5544,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5341,6 +5562,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(4).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + load.set_metadata(self.memory_tbaa, tbaa_kind); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5357,6 +5580,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5371,6 +5596,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I64AtomicStore { ref memarg } => { let value = state.pop1()?; @@ -5384,6 +5611,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5398,6 +5627,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I32AtomicStore8 { ref memarg } | Operator::I64AtomicStore8 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5411,6 +5642,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5427,6 +5660,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I32AtomicStore16 { ref memarg } | Operator::I64AtomicStore16 { ref memarg } => { @@ -5441,6 +5676,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5457,6 +5694,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I64AtomicStore32 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5470,6 +5709,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5486,6 +5727,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); + let tbaa_kind = context.get_kind_id("tbaa"); + store.set_metadata(self.memory_tbaa, tbaa_kind); } Operator::I32AtomicRmw8UAdd { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5499,6 +5742,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5533,6 +5778,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5567,6 +5814,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5598,6 +5847,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5632,6 +5883,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5666,6 +5919,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5700,6 +5955,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5731,6 +5988,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5765,6 +6024,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5799,6 +6060,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5830,6 +6093,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5864,6 +6129,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5898,6 +6165,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5932,6 +6201,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5963,6 +6234,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5997,6 +6270,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6031,6 +6306,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6062,6 +6339,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6096,6 +6375,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6130,6 +6411,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6164,6 +6447,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6195,6 +6480,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6229,6 +6516,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6263,6 +6552,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6295,6 +6586,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6329,6 +6622,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6363,6 +6658,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6397,6 +6694,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6428,6 +6727,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6462,6 +6763,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6496,6 +6799,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6527,6 +6832,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6561,6 +6868,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6595,6 +6904,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6629,6 +6940,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6660,6 +6973,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6694,6 +7009,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6728,6 +7045,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6759,6 +7078,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6793,6 +7114,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6827,6 +7150,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6861,6 +7186,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6893,6 +7220,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6935,6 +7264,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6977,6 +7308,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7011,6 +7344,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i8_ptr_ty, 1, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7053,6 +7388,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i16_ptr_ty, 2, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7095,6 +7432,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i32_ptr_ty, 4, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7137,6 +7476,8 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { memarg, intrinsics.i64_ptr_ty, 8, + self.context_field_ptr_to_base_tbaa, + self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7316,6 +7657,97 @@ impl ModuleCodeGenerator Some(Linkage::External), ); + module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); + let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; + let memory = context.metadata_string("memory"); + module.add_global_metadata( + "memory", + &MetadataValue::create_node(&[memory.into(), tbaa_root.into()]), + ); + let memory_tbaa = module.get_global_metadata("memory")[0]; + module.add_global_metadata( + "memory_memop", + &MetadataValue::create_node(&[ + memory_tbaa.into(), + memory_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let memory_tbaa = module.get_global_metadata("memory_memop")[0]; + + let locals = context.metadata_string("locals"); + module.add_global_metadata( + "locals", + &MetadataValue::create_node(&[locals.into(), tbaa_root.into()]), + ); + let locals_tbaa = module.get_global_metadata("locals")[0]; + module.add_global_metadata( + "locals_memop", + &MetadataValue::create_node(&[ + locals_tbaa.into(), + locals_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let locals_tbaa = module.get_global_metadata("locals_memop")[0]; + + let globals = context.metadata_string("globals"); + module.add_global_metadata( + "globals", + &MetadataValue::create_node(&[globals.into(), tbaa_root.into()]), + ); + let globals_tbaa = module.get_global_metadata("globals")[0]; + module.add_global_metadata( + "globals_memop", + &MetadataValue::create_node(&[ + globals_tbaa.into(), + globals_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let globals_tbaa = module.get_global_metadata("globals_memop")[0]; + + let context_field_ptr_to_base_tbaa = + context.metadata_string("context_field_ptr_to_base_tbaa"); + module.add_global_metadata( + "context_field_ptr_to_base_tbaa", + &MetadataValue::create_node(&[context_field_ptr_to_base_tbaa.into(), tbaa_root.into()]), + ); + let context_field_ptr_to_base_tbaa = + module.get_global_metadata("context_field_ptr_to_base_tbaa")[0]; + module.add_global_metadata( + "context_field_ptr_to_base_tbaa_memop", + &MetadataValue::create_node(&[ + context_field_ptr_to_base_tbaa.into(), + context_field_ptr_to_base_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let context_field_ptr_to_base_tbaa = + module.get_global_metadata("context_field_ptr_to_base_tbaa_memop")[0]; + + let context_field_ptr_to_bounds_tbaa = + context.metadata_string("context_field_ptr_to_bounds_tbaa"); + module.add_global_metadata( + "context_field_ptr_to_bounds_tbaa", + &MetadataValue::create_node(&[ + context_field_ptr_to_bounds_tbaa.into(), + tbaa_root.into(), + ]), + ); + let context_field_ptr_to_bounds_tbaa = + module.get_global_metadata("context_field_ptr_to_bounds_tbaa")[0]; + module.add_global_metadata( + "context_field_ptr_to_bounds_tbaa_memop", + &MetadataValue::create_node(&[ + context_field_ptr_to_bounds_tbaa.into(), + context_field_ptr_to_bounds_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let context_field_ptr_to_bounds_tbaa = + module.get_global_metadata("context_field_ptr_to_bounds_tbaa_memop")[0]; + LLVMModuleCodeGenerator { context: Some(context), builder: Some(builder), @@ -7329,7 +7761,12 @@ impl ModuleCodeGenerator personality_func, stackmaps: Rc::new(RefCell::new(StackmapRegistry::default())), track_state: false, - target_machine: target_machine, + target_machine, + memory_tbaa, + locals_tbaa, + globals_tbaa, + context_field_ptr_to_base_tbaa, + context_field_ptr_to_bounds_tbaa, } } @@ -7436,6 +7873,11 @@ impl ModuleCodeGenerator index: local_func_index, opcode_offset: 0, track_state: self.track_state, + memory_tbaa: self.memory_tbaa, + locals_tbaa: self.locals_tbaa, + globals_tbaa: self.globals_tbaa, + context_field_ptr_to_base_tbaa: self.context_field_ptr_to_base_tbaa, + context_field_ptr_to_bounds_tbaa: self.context_field_ptr_to_bounds_tbaa, }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) @@ -7485,6 +7927,7 @@ impl ModuleCodeGenerator pass_manager.add_scalar_repl_aggregates_pass(); pass_manager.add_instruction_combining_pass(); pass_manager.add_cfg_simplification_pass(); + pass_manager.add_type_based_alias_analysis_pass(); pass_manager.add_gvn_pass(); pass_manager.add_jump_threading_pass(); pass_manager.add_correlated_value_propagation_pass(); From e7d1742c63bbba7a3091eb397f16c34132f0735f Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Wed, 30 Oct 2019 13:11:29 -0700 Subject: [PATCH 03/11] Update module to be held by Rc> so that we can pass it to LLVMFunctionCodeGenerator. Use that to generate distinct TBAA labels for distinct local variables. --- lib/llvm-backend/src/backend.rs | 6 ++- lib/llvm-backend/src/code.rs | 73 +++++++++++++++++++++++++-------- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/lib/llvm-backend/src/backend.rs b/lib/llvm-backend/src/backend.rs index f7847f063..48a227cc8 100644 --- a/lib/llvm-backend/src/backend.rs +++ b/lib/llvm-backend/src/backend.rs @@ -9,12 +9,14 @@ use inkwell::{ use libc::c_char; use std::{ any::Any, + cell::RefCell, ffi::{c_void, CString}, fs::File, io::Write, mem, ops::Deref, ptr::{self, NonNull}, + rc::Rc, slice, str, sync::{Arc, Once}, }; @@ -167,14 +169,14 @@ pub struct LLVMBackend { impl LLVMBackend { pub fn new( - module: Module, + module: Rc>, _intrinsics: Intrinsics, _stackmaps: &StackmapRegistry, _module_info: &ModuleInfo, target_machine: &TargetMachine, ) -> (Self, LLVMCache) { let memory_buffer = target_machine - .write_to_memory_buffer(&module, FileType::Object) + .write_to_memory_buffer(&module.borrow_mut(), FileType::Object) .unwrap(); let mem_buf_slice = memory_buffer.as_slice(); diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index b5d7dd57b..d2b03ea8e 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -6,8 +6,8 @@ use inkwell::{ targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine}, types::{BasicType, BasicTypeEnum, FunctionType, PointerType, VectorType}, values::{ - BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, MetadataValue, PhiValue, - PointerValue, VectorValue, + BasicValue, BasicValueEnum, FloatValue, FunctionValue, InstructionValue, IntValue, + MetadataValue, PhiValue, PointerValue, VectorValue, }, AddressSpace, AtomicOrdering, AtomicRMWBinOp, FloatPredicate, IntPredicate, OptimizationLevel, }; @@ -647,6 +647,41 @@ fn resolve_memory_ptr( .into_pointer_value()) } +fn local_tbaa( + module: Rc>, + intrinsics: &Intrinsics, + instruction: InstructionValue, + index: u32, +) { + let module = module.borrow_mut(); + let context = module.get_context(); + + module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); + let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; + + let name = format!("local {}", index); + let local = context.metadata_string(name.as_str()); + module.add_global_metadata( + name.as_str(), + &MetadataValue::create_node(&[local.into(), tbaa_root.into()]), + ); + let local_tbaa = module.get_global_metadata(name.as_str())[0]; + + let name = name + "_memop"; + module.add_global_metadata( + name.as_str(), + &MetadataValue::create_node(&[ + local_tbaa.into(), + local_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let local_tbaa = module.get_global_metadata(name.as_str())[0]; + + let tbaa_kind = context.get_kind_id("tbaa"); + instruction.set_metadata(local_tbaa, tbaa_kind); +} + fn emit_stack_map( _module_info: &ModuleInfo, intrinsics: &Intrinsics, @@ -814,7 +849,7 @@ pub struct LLVMModuleCodeGenerator { function_signatures: Option>>, func_import_count: usize, personality_func: FunctionValue, - module: Module, + module: Rc>, stackmaps: Rc>, track_state: bool, target_machine: TargetMachine, @@ -842,6 +877,7 @@ pub struct LLVMFunctionCodeGenerator { index: usize, opcode_offset: usize, track_state: bool, + module: Rc>, memory_tbaa: MetadataValue, locals_tbaa: MetadataValue, globals_tbaa: MetadataValue, @@ -1548,10 +1584,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { Operator::GetLocal { local_index } => { let pointer_value = locals[local_index as usize]; let v = builder.build_load(pointer_value, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - v.as_instruction_value() - .unwrap() - .set_metadata(self.locals_tbaa, tbaa_kind); + local_tbaa( + self.module.clone(), + intrinsics, + v.as_instruction_value().unwrap(), + local_index, + ); state.push1(v); } Operator::SetLocal { local_index } => { @@ -1559,16 +1597,14 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { let (v, i) = state.pop1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); let store = builder.build_store(pointer_value, v); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.locals_tbaa, tbaa_kind); + local_tbaa(self.module.clone(), intrinsics, store, local_index); } Operator::TeeLocal { local_index } => { let pointer_value = locals[local_index as usize]; let (v, i) = state.peek1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); let store = builder.build_store(pointer_value, v); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.locals_tbaa, tbaa_kind); + local_tbaa(self.module.clone(), intrinsics, store, local_index); } Operator::GetGlobal { global_index } => { @@ -7752,7 +7788,7 @@ impl ModuleCodeGenerator context: Some(context), builder: Some(builder), intrinsics: Some(intrinsics), - module, + module: Rc::new(RefCell::new(module)), functions: vec![], signatures: Map::new(), signatures_raw: Map::new(), @@ -7800,7 +7836,7 @@ impl ModuleCodeGenerator [FuncIndex::new(self.func_import_count + self.functions.len())]; let func_sig = self.signatures_raw[sig_id].clone(); - let function = self.module.add_function( + let function = self.module.borrow_mut().add_function( &format!("fn{}", self.func_import_count + self.functions.len()), self.signatures[sig_id], Some(Linkage::External), @@ -7873,6 +7909,7 @@ impl ModuleCodeGenerator index: local_func_index, opcode_offset: 0, track_state: self.track_state, + module: self.module.clone(), memory_tbaa: self.memory_tbaa, locals_tbaa: self.locals_tbaa, globals_tbaa: self.globals_tbaa, @@ -7906,7 +7943,7 @@ impl ModuleCodeGenerator generate_trampolines( module_info, &self.signatures, - &self.module, + &self.module.borrow_mut(), self.context.as_ref().unwrap(), self.builder.as_ref().unwrap(), self.intrinsics.as_ref().unwrap(), @@ -7916,7 +7953,7 @@ impl ModuleCodeGenerator })?; if let Some(path) = unsafe { &crate::GLOBAL_OPTIONS.pre_opt_ir } { - self.module.print_to_file(path).unwrap(); + self.module.borrow_mut().print_to_file(path).unwrap(); } let pass_manager = PassManager::create(()); @@ -7937,16 +7974,16 @@ impl ModuleCodeGenerator pass_manager.add_cfg_simplification_pass(); pass_manager.add_bit_tracking_dce_pass(); pass_manager.add_slp_vectorize_pass(); - pass_manager.run_on(&self.module); + pass_manager.run_on(&*self.module.borrow_mut()); if let Some(path) = unsafe { &crate::GLOBAL_OPTIONS.post_opt_ir } { - self.module.print_to_file(path).unwrap(); + self.module.borrow_mut().print_to_file(path).unwrap(); } let stackmaps = self.stackmaps.borrow(); let (backend, cache_gen) = LLVMBackend::new( - self.module, + self.module.clone(), self.intrinsics.take().unwrap(), &*stackmaps, module_info, From 74eaec968e5984189c63dee18d1a33170f34fa87 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Wed, 30 Oct 2019 14:05:11 -0700 Subject: [PATCH 04/11] Remove all *_tbaa fields from LLVMModuleCodeGenerator and LLVMFunctionCodeGenerator. --- lib/llvm-backend/src/code.rs | 784 ++++++++++++++--------------------- 1 file changed, 319 insertions(+), 465 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index d2b03ea8e..103d27429 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -552,14 +552,13 @@ fn resolve_memory_ptr( builder: &Builder, intrinsics: &Intrinsics, context: &Context, + module: Rc>, function: &FunctionValue, state: &mut State, ctx: &mut CtxType, memarg: &MemoryImmediate, ptr_ty: PointerType, value_size: usize, - context_field_ptr_to_base_tbaa: MetadataValue, - context_field_ptr_to_bounds_tbaa: MetadataValue, ) -> Result { // Look up the memory base (as pointer) and bounds (as unsigned integer). let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics); @@ -572,14 +571,20 @@ fn resolve_memory_ptr( .build_load(ptr_to_base_ptr, "base") .into_pointer_value(); let bounds = builder.build_load(ptr_to_bounds, "bounds").into_int_value(); - let tbaa_kind = context.get_kind_id("tbaa"); - base.as_instruction_value() - .unwrap() - .set_metadata(context_field_ptr_to_base_tbaa, tbaa_kind); - bounds - .as_instruction_value() - .unwrap() - .set_metadata(context_field_ptr_to_bounds_tbaa, tbaa_kind); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_base", + base.as_instruction_value().unwrap(), + None, + ); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_bounds", + bounds.as_instruction_value().unwrap(), + None, + ); (base, bounds) } MemoryCache::Static { base_ptr, bounds } => (base_ptr, bounds), @@ -647,11 +652,12 @@ fn resolve_memory_ptr( .into_pointer_value()) } -fn local_tbaa( +fn tbaa_label( module: Rc>, intrinsics: &Intrinsics, + label: &str, instruction: InstructionValue, - index: u32, + index: Option, ) { let module = module.borrow_mut(); let context = module.get_context(); @@ -659,27 +665,31 @@ fn local_tbaa( module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; - let name = format!("local {}", index); - let local = context.metadata_string(name.as_str()); + let label = if let Some(idx) = index { + format!("{}{}", label, idx) + } else { + label.to_string() + }; + let type_label = context.metadata_string(label.as_str()); module.add_global_metadata( - name.as_str(), - &MetadataValue::create_node(&[local.into(), tbaa_root.into()]), + label.as_str(), + &MetadataValue::create_node(&[type_label.into(), tbaa_root.into()]), ); - let local_tbaa = module.get_global_metadata(name.as_str())[0]; + let type_tbaa = module.get_global_metadata(label.as_str())[0]; - let name = name + "_memop"; + let label = label + "_memop"; module.add_global_metadata( - name.as_str(), + label.as_str(), &MetadataValue::create_node(&[ - local_tbaa.into(), - local_tbaa.into(), + type_tbaa.into(), + type_tbaa.into(), intrinsics.i64_zero.into(), ]), ); - let local_tbaa = module.get_global_metadata(name.as_str())[0]; + let type_tbaa = module.get_global_metadata(label.as_str())[0]; let tbaa_kind = context.get_kind_id("tbaa"); - instruction.set_metadata(local_tbaa, tbaa_kind); + instruction.set_metadata(type_tbaa, tbaa_kind); } fn emit_stack_map( @@ -853,11 +863,6 @@ pub struct LLVMModuleCodeGenerator { stackmaps: Rc>, track_state: bool, target_machine: TargetMachine, - memory_tbaa: MetadataValue, - locals_tbaa: MetadataValue, - globals_tbaa: MetadataValue, - context_field_ptr_to_base_tbaa: MetadataValue, - context_field_ptr_to_bounds_tbaa: MetadataValue, } pub struct LLVMFunctionCodeGenerator { @@ -878,11 +883,6 @@ pub struct LLVMFunctionCodeGenerator { opcode_offset: usize, track_state: bool, module: Rc>, - memory_tbaa: MetadataValue, - locals_tbaa: MetadataValue, - globals_tbaa: MetadataValue, - context_field_ptr_to_base_tbaa: MetadataValue, - context_field_ptr_to_bounds_tbaa: MetadataValue, } impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { @@ -1584,11 +1584,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { Operator::GetLocal { local_index } => { let pointer_value = locals[local_index as usize]; let v = builder.build_load(pointer_value, &state.var_name()); - local_tbaa( + tbaa_label( self.module.clone(), intrinsics, + "local", v.as_instruction_value().unwrap(), - local_index, + Some(local_index), ); state.push1(v); } @@ -1597,14 +1598,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { let (v, i) = state.pop1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); let store = builder.build_store(pointer_value, v); - local_tbaa(self.module.clone(), intrinsics, store, local_index); + tbaa_label( + self.module.clone(), + intrinsics, + "local", + store, + Some(local_index), + ); } Operator::TeeLocal { local_index } => { let pointer_value = locals[local_index as usize]; let (v, i) = state.peek1_extra()?; let v = apply_pending_canonicalization(builder, intrinsics, v, i); let store = builder.build_store(pointer_value, v); - local_tbaa(self.module.clone(), intrinsics, store, local_index); + tbaa_label( + self.module.clone(), + intrinsics, + "local", + store, + Some(local_index), + ); } Operator::GetGlobal { global_index } => { @@ -1616,11 +1629,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { } GlobalCache::Mut { ptr_to_value } => { let value = builder.build_load(ptr_to_value, "global_value"); - let tbaa_kind = context.get_kind_id("tbaa"); - value - .as_instruction_value() - .unwrap() - .set_metadata(self.locals_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "global", + value.as_instruction_value().unwrap(), + Some(global_index), + ); state.push1(value); } } @@ -1633,8 +1648,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { match global_cache { GlobalCache::Mut { ptr_to_value } => { let store = builder.build_store(ptr_to_value, value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.globals_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "global", + store, + Some(global_index), + ); } GlobalCache::Const { value: _ } => { return Err(CodegenError { @@ -4425,21 +4445,22 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + result.as_instruction_value().unwrap(), + Some(0), + ); state.push1(result); } Operator::I64Load { ref memarg } => { @@ -4447,21 +4468,22 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + result.as_instruction_value().unwrap(), + Some(0), + ); state.push1(result); } Operator::F32Load { ref memarg } => { @@ -4469,21 +4491,22 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.f32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + result.as_instruction_value().unwrap(), + Some(0), + ); state.push1(result); } Operator::F64Load { ref memarg } => { @@ -4491,21 +4514,22 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.f64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + result.as_instruction_value().unwrap(), + Some(0), + ); state.push1(result); } Operator::V128Load { ref memarg } => { @@ -4513,21 +4537,22 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i128_ptr_ty, 16, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let result = builder.build_load(effective_address, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + result.as_instruction_value().unwrap(), + Some(0), + ); state.push1(result); } @@ -4537,18 +4562,16 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let store = builder.build_store(effective_address, value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I64Store { ref memarg } => { let value = state.pop1()?; @@ -4556,18 +4579,16 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let store = builder.build_store(effective_address, value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::F32Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4576,18 +4597,16 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.f32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let store = builder.build_store(effective_address, v); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::F64Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4596,18 +4615,16 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.f64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let store = builder.build_store(effective_address, v); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::V128Store { ref memarg } => { let (v, i) = state.pop1_extra()?; @@ -4616,43 +4633,42 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i128_ptr_ty, 16, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let store = builder.build_store(effective_address, v); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I32Load8S { ref memarg } => { let effective_address = resolve_memory_ptr( builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_s_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I32Load16S { ref memarg } => { @@ -4660,25 +4676,27 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; - let narrow_result = builder - .build_load(effective_address, &state.var_name()) - .into_int_value(); - let result = - builder.build_int_s_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + let narrow_result = builder.build_load(effective_address, &state.var_name()); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); + let result = builder.build_int_s_extend( + narrow_result.into_int_value(), + intrinsics.i32_ty, + &state.var_name(), + ); state.push1(result); } Operator::I64Load8S { ref memarg } => { @@ -4686,25 +4704,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load16S { ref memarg } => { @@ -4712,25 +4731,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load32S { ref memarg } => { @@ -4738,25 +4758,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_s_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } @@ -4765,25 +4786,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I32Load16U { ref memarg } => { @@ -4791,25 +4813,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load8U { ref memarg } => { @@ -4817,25 +4840,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load16U { ref memarg } => { @@ -4843,25 +4867,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } Operator::I64Load32U { ref memarg } => { @@ -4869,25 +4894,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_result = builder .build_load(effective_address, &state.var_name()) .into_int_value(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + narrow_result.as_instruction_value().unwrap(), + Some(0), + ); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); - let tbaa_kind = context.get_kind_id("tbaa"); - result - .as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); state.push1(result); } @@ -4897,20 +4923,18 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i8_ty, &state.var_name()); let store = builder.build_store(effective_address, narrow_value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I32Store16 { ref memarg } | Operator::I64Store16 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -4918,20 +4942,18 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i16_ty, &state.var_name()); let store = builder.build_store(effective_address, narrow_value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I64Store32 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -4939,20 +4961,18 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; let narrow_value = builder.build_int_truncate(value, intrinsics.i32_ty, &state.var_name()); let store = builder.build_store(effective_address, narrow_value); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I8x16Neg => { let (v, i) = state.pop1_extra()?; @@ -5247,24 +5267,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; - let elem = builder.build_load(effective_address, "").into_int_value(); - let tbaa_kind = context.get_kind_id("tbaa"); - elem.as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + let elem = builder.build_load(effective_address, ""); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + elem.as_instruction_value().unwrap(), + Some(0), + ); let res = splat_vector( builder, intrinsics, - elem.as_basic_value_enum(), + elem, intrinsics.i8x16_ty, &state.var_name(), ); @@ -5276,24 +5298,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; - let elem = builder.build_load(effective_address, "").into_int_value(); - let tbaa_kind = context.get_kind_id("tbaa"); - elem.as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + let elem = builder.build_load(effective_address, ""); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + elem.as_instruction_value().unwrap(), + Some(0), + ); let res = splat_vector( builder, intrinsics, - elem.as_basic_value_enum(), + elem, intrinsics.i16x8_ty, &state.var_name(), ); @@ -5305,24 +5329,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; - let elem = builder.build_load(effective_address, "").into_int_value(); - let tbaa_kind = context.get_kind_id("tbaa"); - elem.as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + let elem = builder.build_load(effective_address, ""); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + elem.as_instruction_value().unwrap(), + Some(0), + ); let res = splat_vector( builder, intrinsics, - elem.as_basic_value_enum(), + elem, intrinsics.i32x4_ty, &state.var_name(), ); @@ -5334,24 +5360,26 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; - let elem = builder.build_load(effective_address, "").into_int_value(); - let tbaa_kind = context.get_kind_id("tbaa"); - elem.as_instruction_value() - .unwrap() - .set_metadata(self.memory_tbaa, tbaa_kind); + let elem = builder.build_load(effective_address, ""); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + elem.as_instruction_value().unwrap(), + Some(0), + ); let res = splat_vector( builder, intrinsics, - elem.as_basic_value_enum(), + elem, intrinsics.i64x2_ty, &state.var_name(), ); @@ -5372,14 +5400,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5394,8 +5421,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(4).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); state.push1(result); } Operator::I64AtomicLoad { ref memarg } => { @@ -5403,14 +5429,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5425,8 +5450,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(8).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); state.push1(result); } Operator::I32AtomicLoad8U { ref memarg } => { @@ -5434,14 +5458,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5458,8 +5481,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(1).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); state.push1(result); @@ -5469,14 +5491,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5493,8 +5514,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(2).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); let result = builder.build_int_z_extend(narrow_result, intrinsics.i32_ty, &state.var_name()); state.push1(result); @@ -5504,14 +5524,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5528,8 +5547,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(1).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5539,14 +5557,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5563,8 +5580,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(2).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5574,14 +5590,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5598,8 +5613,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { load.set_alignment(4).unwrap(); load.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - load.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", load, Some(0)); let result = builder.build_int_z_extend(narrow_result, intrinsics.i64_ty, &state.var_name()); state.push1(result); @@ -5610,14 +5624,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5632,8 +5645,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I64AtomicStore { ref memarg } => { let value = state.pop1()?; @@ -5641,14 +5653,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5663,8 +5674,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I32AtomicStore8 { ref memarg } | Operator::I64AtomicStore8 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5672,14 +5682,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5696,8 +5705,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I32AtomicStore16 { ref memarg } | Operator::I64AtomicStore16 { ref memarg } => { @@ -5706,14 +5714,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5730,8 +5737,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I64AtomicStore32 { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5739,14 +5745,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5763,8 +5768,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { store .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap(); - let tbaa_kind = context.get_kind_id("tbaa"); - store.set_metadata(self.memory_tbaa, tbaa_kind); + tbaa_label(self.module.clone(), intrinsics, "memory", store, Some(0)); } Operator::I32AtomicRmw8UAdd { ref memarg } => { let value = state.pop1()?.into_int_value(); @@ -5772,14 +5776,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5808,14 +5811,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5844,14 +5846,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5877,14 +5878,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5913,14 +5913,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5949,14 +5948,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -5985,14 +5983,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6018,14 +6015,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6054,14 +6050,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6090,14 +6085,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6123,14 +6117,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6159,14 +6152,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6195,14 +6187,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6231,14 +6222,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6264,14 +6254,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6300,14 +6289,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6336,14 +6324,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6369,14 +6356,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6405,14 +6391,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6441,14 +6426,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6477,14 +6461,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6510,14 +6493,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6546,14 +6528,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6582,14 +6563,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6616,14 +6596,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6652,14 +6631,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6688,14 +6666,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6724,14 +6701,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6757,14 +6733,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6793,14 +6768,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6829,14 +6803,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6862,14 +6835,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6898,14 +6870,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6934,14 +6905,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -6970,14 +6940,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7003,14 +6972,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7039,14 +7007,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7075,14 +7042,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7108,14 +7074,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7144,14 +7109,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7180,14 +7144,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7216,14 +7179,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7250,14 +7212,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7294,14 +7255,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7338,14 +7298,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7374,14 +7333,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i8_ptr_ty, 1, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7418,14 +7376,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i16_ptr_ty, 2, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7462,14 +7419,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i32_ptr_ty, 4, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7506,14 +7462,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { builder, intrinsics, context, + self.module.clone(), &function, &mut state, &mut ctx, memarg, intrinsics.i64_ptr_ty, 8, - self.context_field_ptr_to_base_tbaa, - self.context_field_ptr_to_bounds_tbaa, )?; trap_if_misaligned( builder, @@ -7693,97 +7648,6 @@ impl ModuleCodeGenerator Some(Linkage::External), ); - module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); - let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; - let memory = context.metadata_string("memory"); - module.add_global_metadata( - "memory", - &MetadataValue::create_node(&[memory.into(), tbaa_root.into()]), - ); - let memory_tbaa = module.get_global_metadata("memory")[0]; - module.add_global_metadata( - "memory_memop", - &MetadataValue::create_node(&[ - memory_tbaa.into(), - memory_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let memory_tbaa = module.get_global_metadata("memory_memop")[0]; - - let locals = context.metadata_string("locals"); - module.add_global_metadata( - "locals", - &MetadataValue::create_node(&[locals.into(), tbaa_root.into()]), - ); - let locals_tbaa = module.get_global_metadata("locals")[0]; - module.add_global_metadata( - "locals_memop", - &MetadataValue::create_node(&[ - locals_tbaa.into(), - locals_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let locals_tbaa = module.get_global_metadata("locals_memop")[0]; - - let globals = context.metadata_string("globals"); - module.add_global_metadata( - "globals", - &MetadataValue::create_node(&[globals.into(), tbaa_root.into()]), - ); - let globals_tbaa = module.get_global_metadata("globals")[0]; - module.add_global_metadata( - "globals_memop", - &MetadataValue::create_node(&[ - globals_tbaa.into(), - globals_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let globals_tbaa = module.get_global_metadata("globals_memop")[0]; - - let context_field_ptr_to_base_tbaa = - context.metadata_string("context_field_ptr_to_base_tbaa"); - module.add_global_metadata( - "context_field_ptr_to_base_tbaa", - &MetadataValue::create_node(&[context_field_ptr_to_base_tbaa.into(), tbaa_root.into()]), - ); - let context_field_ptr_to_base_tbaa = - module.get_global_metadata("context_field_ptr_to_base_tbaa")[0]; - module.add_global_metadata( - "context_field_ptr_to_base_tbaa_memop", - &MetadataValue::create_node(&[ - context_field_ptr_to_base_tbaa.into(), - context_field_ptr_to_base_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let context_field_ptr_to_base_tbaa = - module.get_global_metadata("context_field_ptr_to_base_tbaa_memop")[0]; - - let context_field_ptr_to_bounds_tbaa = - context.metadata_string("context_field_ptr_to_bounds_tbaa"); - module.add_global_metadata( - "context_field_ptr_to_bounds_tbaa", - &MetadataValue::create_node(&[ - context_field_ptr_to_bounds_tbaa.into(), - tbaa_root.into(), - ]), - ); - let context_field_ptr_to_bounds_tbaa = - module.get_global_metadata("context_field_ptr_to_bounds_tbaa")[0]; - module.add_global_metadata( - "context_field_ptr_to_bounds_tbaa_memop", - &MetadataValue::create_node(&[ - context_field_ptr_to_bounds_tbaa.into(), - context_field_ptr_to_bounds_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let context_field_ptr_to_bounds_tbaa = - module.get_global_metadata("context_field_ptr_to_bounds_tbaa_memop")[0]; - LLVMModuleCodeGenerator { context: Some(context), builder: Some(builder), @@ -7798,11 +7662,6 @@ impl ModuleCodeGenerator stackmaps: Rc::new(RefCell::new(StackmapRegistry::default())), track_state: false, target_machine, - memory_tbaa, - locals_tbaa, - globals_tbaa, - context_field_ptr_to_base_tbaa, - context_field_ptr_to_bounds_tbaa, } } @@ -7910,11 +7769,6 @@ impl ModuleCodeGenerator opcode_offset: 0, track_state: self.track_state, module: self.module.clone(), - memory_tbaa: self.memory_tbaa, - locals_tbaa: self.locals_tbaa, - globals_tbaa: self.globals_tbaa, - context_field_ptr_to_base_tbaa: self.context_field_ptr_to_base_tbaa, - context_field_ptr_to_bounds_tbaa: self.context_field_ptr_to_bounds_tbaa, }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) From 15ce8bfda7a4965732c18d38427f41f1ca5031dc Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Wed, 30 Oct 2019 16:18:36 -0700 Subject: [PATCH 05/11] Label the loads in intrinsics.rs, most of which are the initial accesses off the context. Move tbaa_label to intrinsics.rs. Move TBAA pass to first in the list, it doesn't get invalidated. Add TBAA labels for internal fields. --- lib/llvm-backend/src/code.rs | 95 ++++++------- lib/llvm-backend/src/intrinsics.rs | 215 ++++++++++++++++++++++++++--- 2 files changed, 236 insertions(+), 74 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index 103d27429..c29b2e881 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -6,8 +6,8 @@ use inkwell::{ targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine}, types::{BasicType, BasicTypeEnum, FunctionType, PointerType, VectorType}, values::{ - BasicValue, BasicValueEnum, FloatValue, FunctionValue, InstructionValue, IntValue, - MetadataValue, PhiValue, PointerValue, VectorValue, + BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue, + VectorValue, }, AddressSpace, AtomicOrdering, AtomicRMWBinOp, FloatPredicate, IntPredicate, OptimizationLevel, }; @@ -29,7 +29,7 @@ use wasmer_runtime_core::{ use wasmparser::{BinaryReaderError, MemoryImmediate, Operator, Type as WpType}; use crate::backend::LLVMBackend; -use crate::intrinsics::{CtxType, GlobalCache, Intrinsics, MemoryCache}; +use crate::intrinsics::{tbaa_label, CtxType, GlobalCache, Intrinsics, MemoryCache}; use crate::read_info::{blocktype_to_type, type_to_type}; use crate::stackmap::{StackmapEntry, StackmapEntryKind, StackmapRegistry, ValueSemantic}; use crate::state::{ControlFrame, ExtraInfo, IfElseState, State}; @@ -561,7 +561,7 @@ fn resolve_memory_ptr( value_size: usize, ) -> Result { // Look up the memory base (as pointer) and bounds (as unsigned integer). - let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics); + let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics, module.clone()); let (mem_base, mem_bound) = match memory_cache { MemoryCache::Dynamic { ptr_to_base_ptr, @@ -652,46 +652,6 @@ fn resolve_memory_ptr( .into_pointer_value()) } -fn tbaa_label( - module: Rc>, - intrinsics: &Intrinsics, - label: &str, - instruction: InstructionValue, - index: Option, -) { - let module = module.borrow_mut(); - let context = module.get_context(); - - module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); - let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; - - let label = if let Some(idx) = index { - format!("{}{}", label, idx) - } else { - label.to_string() - }; - let type_label = context.metadata_string(label.as_str()); - module.add_global_metadata( - label.as_str(), - &MetadataValue::create_node(&[type_label.into(), tbaa_root.into()]), - ); - let type_tbaa = module.get_global_metadata(label.as_str())[0]; - - let label = label + "_memop"; - module.add_global_metadata( - label.as_str(), - &MetadataValue::create_node(&[ - type_tbaa.into(), - type_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let type_tbaa = module.get_global_metadata(label.as_str())[0]; - - let tbaa_kind = context.get_kind_id("tbaa"); - instruction.set_metadata(type_tbaa, tbaa_kind); -} - fn emit_stack_map( _module_info: &ModuleInfo, intrinsics: &Intrinsics, @@ -1027,17 +987,33 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { InternalEvent::GetInternal(idx) => { if state.reachable { let idx = idx as usize; - let field_ptr = ctx.internal_field(idx, intrinsics, builder); + let field_ptr = + ctx.internal_field(idx, intrinsics, self.module.clone(), builder); let result = builder.build_load(field_ptr, "get_internal"); + tbaa_label( + self.module.clone(), + intrinsics, + "internal", + result.as_instruction_value().unwrap(), + Some(idx as u32), + ); state.push1(result); } } InternalEvent::SetInternal(idx) => { if state.reachable { let idx = idx as usize; - let field_ptr = ctx.internal_field(idx, intrinsics, builder); + let field_ptr = + ctx.internal_field(idx, intrinsics, self.module.clone(), builder); let v = state.pop1()?; - builder.build_store(field_ptr, v); + let store = builder.build_store(field_ptr, v); + tbaa_label( + self.module.clone(), + intrinsics, + "internal", + store, + Some(idx as u32), + ); } } } @@ -1622,7 +1598,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { Operator::GetGlobal { global_index } => { let index = GlobalIndex::new(global_index as usize); - let global_cache = ctx.global_cache(index, intrinsics); + let global_cache = ctx.global_cache(index, intrinsics, self.module.clone()); match global_cache { GlobalCache::Const { value } => { state.push1(value); @@ -1644,7 +1620,7 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { let (value, info) = state.pop1_extra()?; let value = apply_pending_canonicalization(builder, intrinsics, value, info); let index = GlobalIndex::new(global_index as usize); - let global_cache = ctx.global_cache(index, intrinsics); + let global_cache = ctx.global_cache(index, intrinsics, self.module.clone()); match global_cache { GlobalCache::Mut { ptr_to_value } => { let store = builder.build_store(ptr_to_value, value); @@ -1712,14 +1688,19 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { ) .collect(); - let func_ptr = - ctx.local_func(local_func_index, llvm_sig, intrinsics, builder); + let func_ptr = ctx.local_func( + local_func_index, + llvm_sig, + intrinsics, + self.module.clone(), + builder, + ); (params, func_ptr) } LocalOrImport::Import(import_func_index) => { let (func_ptr_untyped, ctx_ptr) = - ctx.imported_func(import_func_index, intrinsics); + ctx.imported_func(import_func_index, intrinsics, self.module.clone()); let params: Vec<_> = std::iter::once(ctx_ptr.as_basic_value_enum()) .chain( @@ -1822,8 +1803,12 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { Operator::CallIndirect { index, table_index } => { let sig_index = SigIndex::new(index as usize); let expected_dynamic_sigindex = ctx.dynamic_sigindex(sig_index, intrinsics); - let (table_base, table_bound) = - ctx.table(TableIndex::new(table_index as usize), intrinsics, builder); + let (table_base, table_bound) = ctx.table( + TableIndex::new(table_index as usize), + intrinsics, + self.module.clone(), + builder, + ); let func_index = state.pop1()?.into_int_value(); // We assume the table has the `anyfunc` element type. @@ -7814,11 +7799,11 @@ impl ModuleCodeGenerator if cfg!(test) { pass_manager.add_verifier_pass(); } + pass_manager.add_type_based_alias_analysis_pass(); pass_manager.add_lower_expect_intrinsic_pass(); pass_manager.add_scalar_repl_aggregates_pass(); pass_manager.add_instruction_combining_pass(); pass_manager.add_cfg_simplification_pass(); - pass_manager.add_type_based_alias_analysis_pass(); pass_manager.add_gvn_pass(); pass_manager.add_jump_threading_pass(); pass_manager.add_correlated_value_propagation_pass(); diff --git a/lib/llvm-backend/src/intrinsics.rs b/lib/llvm-backend/src/intrinsics.rs index 36c3b7081..4ec87134b 100644 --- a/lib/llvm-backend/src/intrinsics.rs +++ b/lib/llvm-backend/src/intrinsics.rs @@ -7,12 +7,15 @@ use inkwell::{ BasicType, FloatType, FunctionType, IntType, PointerType, StructType, VectorType, VoidType, }, values::{ - BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PointerValue, VectorValue, + BasicValue, BasicValueEnum, FloatValue, FunctionValue, InstructionValue, IntValue, + MetadataValue, PointerValue, VectorValue, }, AddressSpace, }; +use std::cell::RefCell; use std::collections::HashMap; use std::marker::PhantomData; +use std::rc::Rc; use wasmer_runtime_core::{ memory::MemoryType, module::ModuleInfo, @@ -653,7 +656,12 @@ impl<'a> CtxType<'a> { ptr } - pub fn memory(&mut self, index: MemoryIndex, intrinsics: &Intrinsics) -> MemoryCache { + pub fn memory( + &mut self, + index: MemoryIndex, + intrinsics: &Intrinsics, + module: Rc>, + ) -> MemoryCache { let (cached_memories, info, ctx_ptr_value, cache_builder) = ( &mut self.cached_memories, self.info, @@ -690,6 +698,13 @@ impl<'a> CtxType<'a> { let memory_array_ptr = cache_builder .build_load(memory_array_ptr_ptr, "memory_array_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "memory_array", + memory_array_ptr.as_instruction_value().unwrap(), + None, + ); let const_index = intrinsics.i32_ty.const_int(index, false); let memory_ptr_ptr = unsafe { cache_builder.build_in_bounds_gep( @@ -701,6 +716,13 @@ impl<'a> CtxType<'a> { let memory_ptr = cache_builder .build_load(memory_ptr_ptr, "memory_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "memory_ptr", + memory_ptr.as_instruction_value().unwrap(), + Some(index as u32), + ); let (ptr_to_base_ptr, ptr_to_bounds) = unsafe { ( @@ -714,14 +736,29 @@ impl<'a> CtxType<'a> { ptr_to_base_ptr, ptr_to_bounds, }, - MemoryType::Static | MemoryType::SharedStatic => MemoryCache::Static { - base_ptr: cache_builder + MemoryType::Static | MemoryType::SharedStatic => { + let base_ptr = cache_builder .build_load(ptr_to_base_ptr, "base") - .into_pointer_value(), - bounds: cache_builder + .into_pointer_value(); + let bounds = cache_builder .build_load(ptr_to_bounds, "bounds") - .into_int_value(), - }, + .into_int_value(); + tbaa_label( + module.clone(), + intrinsics, + "memory_base", + base_ptr.as_instruction_value().unwrap(), + Some(index as u32), + ); + tbaa_label( + module.clone(), + intrinsics, + "memory_bounds", + bounds.as_instruction_value().unwrap(), + Some(index as u32), + ); + MemoryCache::Static { base_ptr, bounds } + } } }) } @@ -730,6 +767,7 @@ impl<'a> CtxType<'a> { &mut self, index: TableIndex, intrinsics: &Intrinsics, + module: Rc>, ) -> (PointerValue, PointerValue) { let (cached_tables, info, ctx_ptr_value, cache_builder) = ( &mut self.cached_tables, @@ -768,6 +806,13 @@ impl<'a> CtxType<'a> { let table_array_ptr = cache_builder .build_load(table_array_ptr_ptr, "table_array_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_tables", + table_array_ptr.as_instruction_value().unwrap(), + None, + ); let const_index = intrinsics.i32_ty.const_int(index, false); let table_ptr_ptr = unsafe { cache_builder.build_in_bounds_gep(table_array_ptr, &[const_index], "table_ptr_ptr") @@ -775,6 +820,13 @@ impl<'a> CtxType<'a> { let table_ptr = cache_builder .build_load(table_ptr_ptr, "table_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "table_ptr", + table_array_ptr.as_instruction_value().unwrap(), + Some(index as u32), + ); let (ptr_to_base_ptr, ptr_to_bounds) = unsafe { ( @@ -796,15 +848,30 @@ impl<'a> CtxType<'a> { &mut self, index: TableIndex, intrinsics: &Intrinsics, + module: Rc>, builder: &Builder, ) -> (PointerValue, IntValue) { - let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics); - ( - builder - .build_load(ptr_to_base_ptr, "base_ptr") - .into_pointer_value(), - builder.build_load(ptr_to_bounds, "bounds").into_int_value(), - ) + let (ptr_to_base_ptr, ptr_to_bounds) = + self.table_prepare(index, intrinsics, module.clone()); + let base_ptr = builder + .build_load(ptr_to_base_ptr, "base_ptr") + .into_pointer_value(); + let bounds = builder.build_load(ptr_to_bounds, "bounds").into_int_value(); + tbaa_label( + module.clone(), + intrinsics, + "table_base_ptr", + base_ptr.as_instruction_value().unwrap(), + Some(index.index() as u32), + ); + tbaa_label( + module.clone(), + intrinsics, + "table_bounds", + bounds.as_instruction_value().unwrap(), + Some(index.index() as u32), + ); + (base_ptr, bounds) } pub fn local_func( @@ -812,6 +879,7 @@ impl<'a> CtxType<'a> { index: LocalFuncIndex, fn_ty: FunctionType, intrinsics: &Intrinsics, + module: Rc>, builder: &Builder, ) -> PointerValue { let local_func_array_ptr_ptr = unsafe { @@ -824,6 +892,13 @@ impl<'a> CtxType<'a> { let local_func_array_ptr = builder .build_load(local_func_array_ptr_ptr, "local_func_array_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_local_funcs", + local_func_array_ptr.as_instruction_value().unwrap(), + None, + ); let local_func_ptr_ptr = unsafe { builder.build_in_bounds_gep( local_func_array_ptr, @@ -834,6 +909,13 @@ impl<'a> CtxType<'a> { let local_func_ptr = builder .build_load(local_func_ptr_ptr, "local_func_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "local_func_ptr", + local_func_ptr.as_instruction_value().unwrap(), + Some(index.index() as u32), + ); builder.build_pointer_cast( local_func_ptr, fn_ty.ptr_type(AddressSpace::Generic), @@ -875,7 +957,12 @@ impl<'a> CtxType<'a> { }) } - pub fn global_cache(&mut self, index: GlobalIndex, intrinsics: &Intrinsics) -> GlobalCache { + pub fn global_cache( + &mut self, + index: GlobalIndex, + intrinsics: &Intrinsics, + module: Rc>, + ) -> GlobalCache { let (cached_globals, ctx_ptr_value, info, cache_builder) = ( &mut self.cached_globals, self.ctx_ptr_value, @@ -923,6 +1010,13 @@ impl<'a> CtxType<'a> { let global_array_ptr = cache_builder .build_load(globals_array_ptr_ptr, "global_array_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_globals", + globals_array_ptr_ptr.as_instruction_value().unwrap(), + None, + ); let const_index = intrinsics.i32_ty.const_int(index, false); let global_ptr_ptr = unsafe { cache_builder.build_in_bounds_gep( @@ -934,6 +1028,13 @@ impl<'a> CtxType<'a> { let global_ptr = cache_builder .build_load(global_ptr_ptr, "global_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "global_ptr", + globals_array_ptr_ptr.as_instruction_value().unwrap(), + Some(index as u32), + ); let global_ptr_typed = cache_builder.build_pointer_cast(global_ptr, llvm_ptr_ty, "global_ptr_typed"); @@ -943,9 +1044,15 @@ impl<'a> CtxType<'a> { ptr_to_value: global_ptr_typed, } } else { - GlobalCache::Const { - value: cache_builder.build_load(global_ptr_typed, "global_value"), - } + let value = cache_builder.build_load(global_ptr_typed, "global_value"); + tbaa_label( + module.clone(), + intrinsics, + "global", + value.as_instruction_value().unwrap(), + Some(index as u32), + ); + GlobalCache::Const { value } } }) } @@ -954,6 +1061,7 @@ impl<'a> CtxType<'a> { &mut self, index: ImportedFuncIndex, intrinsics: &Intrinsics, + module: Rc>, ) -> (PointerValue, PointerValue) { let (cached_imported_functions, ctx_ptr_value, cache_builder) = ( &mut self.cached_imported_functions, @@ -972,6 +1080,13 @@ impl<'a> CtxType<'a> { let func_array_ptr = cache_builder .build_load(func_array_ptr_ptr, "func_array_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_imported_funcs", + func_array_ptr.as_instruction_value().unwrap(), + None, + ); let const_index = intrinsics.i32_ty.const_int(index.index() as u64, false); let imported_func_ptr = unsafe { cache_builder.build_in_bounds_gep( @@ -993,6 +1108,20 @@ impl<'a> CtxType<'a> { let ctx_ptr = cache_builder .build_load(ctx_ptr_ptr, "ctx_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "imported_func_ptr", + func_ptr.as_instruction_value().unwrap(), + Some(index.index() as u32), + ); + tbaa_label( + module.clone(), + intrinsics, + "imported_func_ctx_ptr", + ctx_ptr.as_instruction_value().unwrap(), + Some(index.index() as u32), + ); ImportedFuncCache { func_ptr, ctx_ptr } }); @@ -1004,6 +1133,7 @@ impl<'a> CtxType<'a> { &mut self, index: usize, intrinsics: &Intrinsics, + module: Rc>, builder: &Builder, ) -> PointerValue { assert!(index < INTERNALS_SIZE); @@ -1018,6 +1148,13 @@ impl<'a> CtxType<'a> { let local_internals_ptr = builder .build_load(local_internals_ptr_ptr, "local_internals_ptr") .into_pointer_value(); + tbaa_label( + module.clone(), + intrinsics, + "context_field_ptr_to_internals", + local_internals_ptr_ptr.as_instruction_value().unwrap(), + None, + ); unsafe { builder.build_in_bounds_gep( local_internals_ptr, @@ -1027,3 +1164,43 @@ impl<'a> CtxType<'a> { } } } + +pub fn tbaa_label( + module: Rc>, + intrinsics: &Intrinsics, + label: &str, + instruction: InstructionValue, + index: Option, +) { + let module = module.borrow_mut(); + let context = module.get_context(); + + module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); + let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; + + let label = if let Some(idx) = index { + format!("{}{}", label, idx) + } else { + label.to_string() + }; + let type_label = context.metadata_string(label.as_str()); + module.add_global_metadata( + label.as_str(), + &MetadataValue::create_node(&[type_label.into(), tbaa_root.into()]), + ); + let type_tbaa = module.get_global_metadata(label.as_str())[0]; + + let label = label + "_memop"; + module.add_global_metadata( + label.as_str(), + &MetadataValue::create_node(&[ + type_tbaa.into(), + type_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + let type_tbaa = module.get_global_metadata(label.as_str())[0]; + + let tbaa_kind = context.get_kind_id("tbaa"); + instruction.set_metadata(type_tbaa, tbaa_kind); +} From d10d54a4160ac6a62e26e1c307959feff010e649 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Wed, 30 Oct 2019 16:39:39 -0700 Subject: [PATCH 06/11] Add TBAA to atomic ops. --- lib/llvm-backend/src/code.rs | 343 +++++++++++++++++++++++++++++++++++ 1 file changed, 343 insertions(+) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index c29b2e881..43668db39 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -5787,6 +5787,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -5822,6 +5829,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -5855,6 +5869,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I64AtomicRmw8UAdd { ref memarg } => { @@ -5889,6 +5910,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -5924,6 +5952,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -5959,6 +5994,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -5992,6 +6034,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8USub { ref memarg } => { @@ -6026,6 +6075,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6061,6 +6117,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6094,6 +6157,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I64AtomicRmw8USub { ref memarg } => { @@ -6128,6 +6198,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6163,6 +6240,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6198,6 +6282,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6231,6 +6322,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8UAnd { ref memarg } => { @@ -6265,6 +6363,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6300,6 +6405,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6333,6 +6445,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I64AtomicRmw8UAnd { ref memarg } => { @@ -6367,6 +6486,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6402,6 +6528,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6437,6 +6570,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6470,6 +6610,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8UOr { ref memarg } => { @@ -6504,6 +6651,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6539,6 +6693,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6572,6 +6733,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6607,6 +6775,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6642,6 +6817,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6677,6 +6859,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6710,6 +6899,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8UXor { ref memarg } => { @@ -6744,6 +6940,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6779,6 +6982,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -6812,6 +7022,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I64AtomicRmw8UXor { ref memarg } => { @@ -6846,6 +7063,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6881,6 +7105,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6916,6 +7147,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -6949,6 +7187,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8UXchg { ref memarg } => { @@ -6983,6 +7228,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -7018,6 +7270,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name()); state.push1(old); } @@ -7051,6 +7310,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I64AtomicRmw8UXchg { ref memarg } => { @@ -7085,6 +7351,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -7120,6 +7393,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -7155,6 +7435,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name()); state.push1(old); } @@ -7188,6 +7475,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); state.push1(old); } Operator::I32AtomicRmw8UCmpxchg { ref memarg } => { @@ -7226,6 +7520,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder .build_extract_value(old, 0, "") .unwrap() @@ -7269,6 +7570,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder .build_extract_value(old, 0, "") .unwrap() @@ -7308,6 +7616,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_extract_value(old, 0, "").unwrap(); state.push1(old); } @@ -7347,6 +7662,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder .build_extract_value(old, 0, "") .unwrap() @@ -7390,6 +7712,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder .build_extract_value(old, 0, "") .unwrap() @@ -7433,6 +7762,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder .build_extract_value(old, 0, "") .unwrap() @@ -7472,6 +7808,13 @@ impl FunctionCodeGenerator for LLVMFunctionCodeGenerator { AtomicOrdering::SequentiallyConsistent, ) .unwrap(); + tbaa_label( + self.module.clone(), + intrinsics, + "memory", + old.as_instruction_value().unwrap(), + Some(0), + ); let old = builder.build_extract_value(old, 0, "").unwrap(); state.push1(old); } From 88427c969659e42db76c4c885347e7622bb56135 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 31 Oct 2019 11:48:21 -0700 Subject: [PATCH 07/11] Add some comments. --- lib/llvm-backend/src/intrinsics.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/llvm-backend/src/intrinsics.rs b/lib/llvm-backend/src/intrinsics.rs index 4ec87134b..910e59746 100644 --- a/lib/llvm-backend/src/intrinsics.rs +++ b/lib/llvm-backend/src/intrinsics.rs @@ -1165,6 +1165,8 @@ impl<'a> CtxType<'a> { } } +// Given an instruction that operates on memory, mark the access as not aliasing +// other memory accesses which have a different (label, index) pair. pub fn tbaa_label( module: Rc>, intrinsics: &Intrinsics, @@ -1172,12 +1174,27 @@ pub fn tbaa_label( instruction: InstructionValue, index: Option, ) { + // To convey to LLVM that two pointers must be pointing to distinct memory, + // we use LLVM's Type Based Aliasing Analysis, or TBAA, to mark the memory + // operations as having different types whose pointers may not alias. + // + // See the LLVM documentation at + // https://llvm.org/docs/LangRef.html#tbaa-metadata + // + // LLVM TBAA supports many features, but we use it in a simple way, with + // only scalar types that are children of the root node. Every TBAA type we + // declare is `noalias` with the others: + // https://llvm.org/docs/AliasAnalysis.html#must-may-and-no-alias-responses + let module = module.borrow_mut(); let context = module.get_context(); + // `!wasmer_tbaa_root = {}`, the TBAA root node for wasmer. module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; + // Construct (or look up) the type descriptor, for example + // `!"local 0" = !{!"local 0", !wasmer_tbaa_root}`. let label = if let Some(idx) = index { format!("{}{}", label, idx) } else { @@ -1190,6 +1207,12 @@ pub fn tbaa_label( ); let type_tbaa = module.get_global_metadata(label.as_str())[0]; + // Construct (or look up) the access tag, which is a struct of the form + // (base type, acess type, offset). + // + // "If BaseTy is a scalar type, Offset must be 0 and BaseTy and AccessTy + // must be the same". + // -- https://llvm.org/docs/LangRef.html#tbaa-metadata let label = label + "_memop"; module.add_global_metadata( label.as_str(), @@ -1201,6 +1224,7 @@ pub fn tbaa_label( ); let type_tbaa = module.get_global_metadata(label.as_str())[0]; + // Attach the access tag to the instruction. let tbaa_kind = context.get_kind_id("tbaa"); instruction.set_metadata(type_tbaa, tbaa_kind); } From 0ba686ffc6c87798b23de3bcb8d5baf57a6469e3 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 31 Oct 2019 11:49:36 -0700 Subject: [PATCH 08/11] Improve wording a little. --- lib/llvm-backend/src/intrinsics.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/llvm-backend/src/intrinsics.rs b/lib/llvm-backend/src/intrinsics.rs index 910e59746..d424f9c12 100644 --- a/lib/llvm-backend/src/intrinsics.rs +++ b/lib/llvm-backend/src/intrinsics.rs @@ -1183,7 +1183,8 @@ pub fn tbaa_label( // // LLVM TBAA supports many features, but we use it in a simple way, with // only scalar types that are children of the root node. Every TBAA type we - // declare is `noalias` with the others: + // declare is NoAlias with the others. See NoAlias, PartialAlias, + // MayAlias and MustAlias in the LLVM documentation: // https://llvm.org/docs/AliasAnalysis.html#must-may-and-no-alias-responses let module = module.borrow_mut(); From ef4b3c34289211329467d9cbc6cebd5b1319d2e4 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 31 Oct 2019 12:40:32 -0700 Subject: [PATCH 09/11] Improve some TBAA label names, particular for memory. Memory can't change between static and dynamic, so use that in the TBAA label name. Distinguish between local and imported memory, table and globals. --- lib/llvm-backend/src/code.rs | 8 ++-- lib/llvm-backend/src/intrinsics.rs | 69 ++++++++++++++++-------------- 2 files changed, 42 insertions(+), 35 deletions(-) diff --git a/lib/llvm-backend/src/code.rs b/lib/llvm-backend/src/code.rs index 69b2a7ad1..13e203154 100644 --- a/lib/llvm-backend/src/code.rs +++ b/lib/llvm-backend/src/code.rs @@ -577,16 +577,16 @@ fn resolve_memory_ptr( tbaa_label( module.clone(), intrinsics, - "context_field_ptr_to_base", + "dynamic_memory_base", base.as_instruction_value().unwrap(), - None, + Some(0), ); tbaa_label( module.clone(), intrinsics, - "context_field_ptr_to_bounds", + "dynamic_memory_bounds", bounds.as_instruction_value().unwrap(), - None, + Some(0), ); (base, bounds) } diff --git a/lib/llvm-backend/src/intrinsics.rs b/lib/llvm-backend/src/intrinsics.rs index d424f9c12..47d94747d 100644 --- a/lib/llvm-backend/src/intrinsics.rs +++ b/lib/llvm-backend/src/intrinsics.rs @@ -670,30 +670,33 @@ impl<'a> CtxType<'a> { ); *cached_memories.entry(index).or_insert_with(|| { - let (memory_array_ptr_ptr, index, memory_type) = match index.local_or_import(info) { - LocalOrImport::Local(local_mem_index) => ( - unsafe { - cache_builder.build_struct_gep( - ctx_ptr_value, - offset_to_index(Ctx::offset_memories()), - "memory_array_ptr_ptr", - ) - }, - local_mem_index.index() as u64, - info.memories[local_mem_index].memory_type(), - ), - LocalOrImport::Import(import_mem_index) => ( - unsafe { - cache_builder.build_struct_gep( - ctx_ptr_value, - offset_to_index(Ctx::offset_imported_memories()), - "memory_array_ptr_ptr", - ) - }, - import_mem_index.index() as u64, - info.imported_memories[import_mem_index].1.memory_type(), - ), - }; + let (memory_array_ptr_ptr, index, memory_type, field_name) = + match index.local_or_import(info) { + LocalOrImport::Local(local_mem_index) => ( + unsafe { + cache_builder.build_struct_gep( + ctx_ptr_value, + offset_to_index(Ctx::offset_memories()), + "memory_array_ptr_ptr", + ) + }, + local_mem_index.index() as u64, + info.memories[local_mem_index].memory_type(), + "context_field_ptr_to_local_memory", + ), + LocalOrImport::Import(import_mem_index) => ( + unsafe { + cache_builder.build_struct_gep( + ctx_ptr_value, + offset_to_index(Ctx::offset_imported_memories()), + "memory_array_ptr_ptr", + ) + }, + import_mem_index.index() as u64, + info.imported_memories[import_mem_index].1.memory_type(), + "context_field_ptr_to_imported_memory", + ), + }; let memory_array_ptr = cache_builder .build_load(memory_array_ptr_ptr, "memory_array_ptr") @@ -701,7 +704,7 @@ impl<'a> CtxType<'a> { tbaa_label( module.clone(), intrinsics, - "memory_array", + field_name, memory_array_ptr.as_instruction_value().unwrap(), None, ); @@ -746,14 +749,14 @@ impl<'a> CtxType<'a> { tbaa_label( module.clone(), intrinsics, - "memory_base", + "static_memory_base", base_ptr.as_instruction_value().unwrap(), Some(index as u32), ); tbaa_label( module.clone(), intrinsics, - "memory_bounds", + "static_memory_bounds", bounds.as_instruction_value().unwrap(), Some(index as u32), ); @@ -780,7 +783,7 @@ impl<'a> CtxType<'a> { ptr_to_base_ptr, ptr_to_bounds, } = *cached_tables.entry(index).or_insert_with(|| { - let (table_array_ptr_ptr, index) = match index.local_or_import(info) { + let (table_array_ptr_ptr, index, field_name) = match index.local_or_import(info) { LocalOrImport::Local(local_table_index) => ( unsafe { cache_builder.build_struct_gep( @@ -790,6 +793,7 @@ impl<'a> CtxType<'a> { ) }, local_table_index.index() as u64, + "context_field_ptr_to_local_table", ), LocalOrImport::Import(import_table_index) => ( unsafe { @@ -800,6 +804,7 @@ impl<'a> CtxType<'a> { ) }, import_table_index.index() as u64, + "context_field_ptr_to_import_table", ), }; @@ -809,7 +814,7 @@ impl<'a> CtxType<'a> { tbaa_label( module.clone(), intrinsics, - "context_field_ptr_to_tables", + field_name, table_array_ptr.as_instruction_value().unwrap(), None, ); @@ -971,7 +976,7 @@ impl<'a> CtxType<'a> { ); *cached_globals.entry(index).or_insert_with(|| { - let (globals_array_ptr_ptr, index, mutable, wasmer_ty) = + let (globals_array_ptr_ptr, index, mutable, wasmer_ty, field_name) = match index.local_or_import(info) { LocalOrImport::Local(local_global_index) => { let desc = info.globals[local_global_index].desc; @@ -986,6 +991,7 @@ impl<'a> CtxType<'a> { local_global_index.index() as u64, desc.mutable, desc.ty, + "context_field_ptr_to_local_globals", ) } LocalOrImport::Import(import_global_index) => { @@ -1001,6 +1007,7 @@ impl<'a> CtxType<'a> { import_global_index.index() as u64, desc.mutable, desc.ty, + "context_field_ptr_to_imported_globals", ) } }; @@ -1013,7 +1020,7 @@ impl<'a> CtxType<'a> { tbaa_label( module.clone(), intrinsics, - "context_field_ptr_to_globals", + field_name, globals_array_ptr_ptr.as_instruction_value().unwrap(), None, ); From bc521a2837a39c3f4df824b88ba53f6f1fe82898 Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Thu, 31 Oct 2019 12:43:38 -0700 Subject: [PATCH 10/11] Add changelog entry. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4b97be02..afe906514 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## **[Unreleased]** +- [#921](https://github.com/wasmerio/wasmer/pull/921) In LLVM backend, annotate all memory accesses with TBAA metadata. - [#883](https://github.com/wasmerio/wasmer/pull/883) Allow floating point operations to have arbitrary inputs, even including SNaNs. - [#856](https://github.com/wasmerio/wasmer/pull/856) Expose methods in the runtime C API to get a WASI import object From 132757ee9dfea68aa0b85525590c5816347a9f6e Mon Sep 17 00:00:00 2001 From: Nick Lewycky Date: Fri, 1 Nov 2019 17:49:45 -0700 Subject: [PATCH 11/11] Fix leak. Only create one NamedMDNode for each name. --- lib/llvm-backend/src/intrinsics.rs | 49 +++++++++++++++++++----------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/lib/llvm-backend/src/intrinsics.rs b/lib/llvm-backend/src/intrinsics.rs index 47d94747d..173b814c3 100644 --- a/lib/llvm-backend/src/intrinsics.rs +++ b/lib/llvm-backend/src/intrinsics.rs @@ -1198,8 +1198,13 @@ pub fn tbaa_label( let context = module.get_context(); // `!wasmer_tbaa_root = {}`, the TBAA root node for wasmer. - module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); - let tbaa_root = module.get_global_metadata("wasmer_tbaa_root")[0]; + let tbaa_root = module + .get_global_metadata("wasmer_tbaa_root") + .pop() + .unwrap_or_else(|| { + module.add_global_metadata("wasmer_tbaa_root", &MetadataValue::create_node(&[])); + module.get_global_metadata("wasmer_tbaa_root")[0] + }); // Construct (or look up) the type descriptor, for example // `!"local 0" = !{!"local 0", !wasmer_tbaa_root}`. @@ -1209,28 +1214,38 @@ pub fn tbaa_label( label.to_string() }; let type_label = context.metadata_string(label.as_str()); - module.add_global_metadata( - label.as_str(), - &MetadataValue::create_node(&[type_label.into(), tbaa_root.into()]), - ); - let type_tbaa = module.get_global_metadata(label.as_str())[0]; + let type_tbaa = module + .get_global_metadata(label.as_str()) + .pop() + .unwrap_or_else(|| { + module.add_global_metadata( + label.as_str(), + &MetadataValue::create_node(&[type_label.into(), tbaa_root.into()]), + ); + module.get_global_metadata(label.as_str())[0] + }); // Construct (or look up) the access tag, which is a struct of the form - // (base type, acess type, offset). + // (base type, access type, offset). // // "If BaseTy is a scalar type, Offset must be 0 and BaseTy and AccessTy // must be the same". // -- https://llvm.org/docs/LangRef.html#tbaa-metadata let label = label + "_memop"; - module.add_global_metadata( - label.as_str(), - &MetadataValue::create_node(&[ - type_tbaa.into(), - type_tbaa.into(), - intrinsics.i64_zero.into(), - ]), - ); - let type_tbaa = module.get_global_metadata(label.as_str())[0]; + let type_tbaa = module + .get_global_metadata(label.as_str()) + .pop() + .unwrap_or_else(|| { + module.add_global_metadata( + label.as_str(), + &MetadataValue::create_node(&[ + type_tbaa.into(), + type_tbaa.into(), + intrinsics.i64_zero.into(), + ]), + ); + module.get_global_metadata(label.as_str())[0] + }); // Attach the access tag to the instruction. let tbaa_kind = context.get_kind_id("tbaa");