[Cranelift][Atomics] Add address folding for atomic notify/wait. (#2556)

* fold address in wasm wait and notify ops

* add atomics addr folding tests
This commit is contained in:
Yury Delendik
2021-01-08 11:55:21 -06:00
committed by GitHub
parent 5de5bf1565
commit 3580205f12
8 changed files with 307 additions and 16 deletions

View File

@@ -32,6 +32,7 @@ fn main() -> anyhow::Result<()> {
test_directory_module(out, "tests/misc_testsuite/reference-types", strategy)?; test_directory_module(out, "tests/misc_testsuite/reference-types", strategy)?;
test_directory_module(out, "tests/misc_testsuite/multi-memory", strategy)?; test_directory_module(out, "tests/misc_testsuite/multi-memory", strategy)?;
test_directory_module(out, "tests/misc_testsuite/module-linking", strategy)?; test_directory_module(out, "tests/misc_testsuite/module-linking", strategy)?;
test_directory_module(out, "tests/misc_testsuite/threads", strategy)?;
Ok(()) Ok(())
})?; })?;

View File

@@ -1052,6 +1052,7 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
let timeout = state.pop1(); // 64 (fixed) let timeout = state.pop1(); // 64 (fixed)
let expected = state.pop1(); // 32 or 64 (per the `Ixx` in `IxxAtomicWait`) let expected = state.pop1(); // 32 or 64 (per the `Ixx` in `IxxAtomicWait`)
let addr = state.pop1(); // 32 (fixed) let addr = state.pop1(); // 32 (fixed)
let addr = fold_atomic_mem_addr(addr, memarg, implied_ty, builder);
assert!(builder.func.dfg.value_type(expected) == implied_ty); assert!(builder.func.dfg.value_type(expected) == implied_ty);
// `fn translate_atomic_wait` can inspect the type of `expected` to figure out what // `fn translate_atomic_wait` can inspect the type of `expected` to figure out what
// code it needs to generate, if it wants. // code it needs to generate, if it wants.
@@ -1070,6 +1071,7 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
let heap = state.get_heap(builder.func, memarg.memory, environ)?; let heap = state.get_heap(builder.func, memarg.memory, environ)?;
let count = state.pop1(); // 32 (fixed) let count = state.pop1(); // 32 (fixed)
let addr = state.pop1(); // 32 (fixed) let addr = state.pop1(); // 32 (fixed)
let addr = fold_atomic_mem_addr(addr, memarg, I32, builder);
let res = let res =
environ.translate_atomic_notify(builder.cursor(), heap_index, heap, addr, count)?; environ.translate_atomic_notify(builder.cursor(), heap_index, heap, addr, count)?;
state.push1(res); state.push1(res);
@@ -2142,6 +2144,42 @@ fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTran
state.push1(builder.ins().bint(I32, val)); state.push1(builder.ins().bint(I32, val));
} }
fn fold_atomic_mem_addr(
linear_mem_addr: Value,
memarg: &MemoryImmediate,
access_ty: Type,
builder: &mut FunctionBuilder,
) -> Value {
let access_ty_bytes = access_ty.bytes();
let final_lma = if memarg.offset > 0 {
assert!(builder.func.dfg.value_type(linear_mem_addr) == I32);
let linear_mem_addr = builder.ins().uextend(I64, linear_mem_addr);
let a = builder
.ins()
.iadd_imm(linear_mem_addr, i64::from(memarg.offset));
let cflags = builder.ins().ifcmp_imm(a, 0x1_0000_0000i64);
builder.ins().trapif(
IntCC::UnsignedGreaterThanOrEqual,
cflags,
ir::TrapCode::HeapOutOfBounds,
);
builder.ins().ireduce(I32, a)
} else {
linear_mem_addr
};
assert!(access_ty_bytes == 4 || access_ty_bytes == 8);
let final_lma_misalignment = builder
.ins()
.band_imm(final_lma, i64::from(access_ty_bytes - 1));
let f = builder
.ins()
.ifcmp_imm(final_lma_misalignment, i64::from(0));
builder
.ins()
.trapif(IntCC::NotEqual, f, ir::TrapCode::HeapMisaligned);
final_lma
}
// For an atomic memory operation, emit an alignment check for the linear memory address, // For an atomic memory operation, emit an alignment check for the linear memory address,
// and then compute the final effective address. // and then compute the final effective address.
fn finalise_atomic_mem_addr<FE: FuncEnvironment + ?Sized>( fn finalise_atomic_mem_addr<FE: FuncEnvironment + ?Sized>(

View File

@@ -81,6 +81,10 @@ macro_rules! declare_function_signatures {
AbiParam::new(I32).uext() AbiParam::new(I32).uext()
} }
fn i64(&self) -> AbiParam {
AbiParam::new(I64)
}
$( $(
fn $name(&mut self, func: &mut Function) -> ir::SigRef { fn $name(&mut self, func: &mut Function) -> ir::SigRef {
let sig = self.$name.unwrap_or_else(|| { let sig = self.$name.unwrap_or_else(|| {
@@ -258,6 +262,70 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
} }
} }
fn get_memory_atomic_notify(
&mut self,
func: &mut Function,
memory_index: MemoryIndex,
) -> (ir::SigRef, usize, BuiltinFunctionIndex) {
if let Some(defined_memory_index) = self.module.defined_memory_index(memory_index) {
(
self.builtin_function_signatures.memory_atomic_notify(func),
defined_memory_index.index(),
BuiltinFunctionIndex::memory_atomic_notify(),
)
} else {
(
self.builtin_function_signatures
.imported_memory_atomic_notify(func),
memory_index.index(),
BuiltinFunctionIndex::imported_memory_atomic_notify(),
)
}
}
fn get_memory_atomic_wait(
&mut self,
func: &mut Function,
memory_index: MemoryIndex,
ty: ir::Type,
) -> (ir::SigRef, usize, BuiltinFunctionIndex) {
match ty {
I32 => {
if let Some(defined_memory_index) = self.module.defined_memory_index(memory_index) {
(
self.builtin_function_signatures.memory_atomic_wait32(func),
defined_memory_index.index(),
BuiltinFunctionIndex::memory_atomic_wait32(),
)
} else {
(
self.builtin_function_signatures
.imported_memory_atomic_wait32(func),
memory_index.index(),
BuiltinFunctionIndex::imported_memory_atomic_wait32(),
)
}
}
I64 => {
if let Some(defined_memory_index) = self.module.defined_memory_index(memory_index) {
(
self.builtin_function_signatures.memory_atomic_wait64(func),
defined_memory_index.index(),
BuiltinFunctionIndex::memory_atomic_wait64(),
)
} else {
(
self.builtin_function_signatures
.imported_memory_atomic_wait64(func),
memory_index.index(),
BuiltinFunctionIndex::imported_memory_atomic_wait64(),
)
}
}
x => panic!("get_memory_atomic_wait unsupported type: {:?}", x),
}
}
fn get_memory_init_func(&mut self, func: &mut Function) -> (ir::SigRef, BuiltinFunctionIndex) { fn get_memory_init_func(&mut self, func: &mut Function) -> (ir::SigRef, BuiltinFunctionIndex) {
( (
self.builtin_function_signatures.memory_init(func), self.builtin_function_signatures.memory_init(func),
@@ -1368,29 +1436,50 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
fn translate_atomic_wait( fn translate_atomic_wait(
&mut self, &mut self,
_pos: FuncCursor, mut pos: FuncCursor,
_index: MemoryIndex, memory_index: MemoryIndex,
_heap: ir::Heap, _heap: ir::Heap,
_addr: ir::Value, addr: ir::Value,
_expected: ir::Value, expected: ir::Value,
_timeout: ir::Value, timeout: ir::Value,
) -> WasmResult<ir::Value> { ) -> WasmResult<ir::Value> {
Err(WasmError::Unsupported( let implied_ty = pos.func.dfg.value_type(expected);
"wasm atomics (fn translate_atomic_wait)".to_string(), let (func_sig, memory_index, func_idx) =
)) self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty);
let memory_index_arg = pos.ins().iconst(I32, memory_index as i64);
let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
let call_inst = pos.ins().call_indirect(
func_sig,
func_addr,
&[vmctx, memory_index_arg, addr, expected, timeout],
);
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
} }
fn translate_atomic_notify( fn translate_atomic_notify(
&mut self, &mut self,
_pos: FuncCursor, mut pos: FuncCursor,
_index: MemoryIndex, memory_index: MemoryIndex,
_heap: ir::Heap, _heap: ir::Heap,
_addr: ir::Value, addr: ir::Value,
_count: ir::Value, count: ir::Value,
) -> WasmResult<ir::Value> { ) -> WasmResult<ir::Value> {
Err(WasmError::Unsupported( let (func_sig, memory_index, func_idx) =
"wasm atomics (fn translate_atomic_notify)".to_string(), self.get_memory_atomic_notify(&mut pos.func, memory_index);
))
let memory_index_arg = pos.ins().iconst(I32, memory_index as i64);
let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
let call_inst =
pos.ins()
.call_indirect(func_sig, func_addr, &[vmctx, memory_index_arg, addr, count]);
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
} }
fn translate_loop_header(&mut self, mut pos: FuncCursor) -> WasmResult<()> { fn translate_loop_header(&mut self, mut pos: FuncCursor) -> WasmResult<()> {

View File

@@ -45,6 +45,18 @@ macro_rules! foreach_builtin_function {
externref_global_get(vmctx, i32) -> (reference); externref_global_get(vmctx, i32) -> (reference);
/// Returns an index for Wasm's `global.get` instruction for `externref`s. /// Returns an index for Wasm's `global.get` instruction for `externref`s.
externref_global_set(vmctx, i32, reference) -> (); externref_global_set(vmctx, i32, reference) -> ();
/// Returns an index for wasm's `memory.atomic.notify` for locally defined memories.
memory_atomic_notify(vmctx, i32, i32, i32) -> (i32);
/// Returns an index for wasm's `memory.atomic.notify` for imported memories.
imported_memory_atomic_notify(vmctx, i32, i32, i32) -> (i32);
/// Returns an index for wasm's `memory.atomic.wait32` for locally defined memories.
memory_atomic_wait32(vmctx, i32, i32, i32, i64) -> (i32);
/// Returns an index for wasm's `memory.atomic.wait32` for imported memories.
imported_memory_atomic_wait32(vmctx, i32, i32, i32, i64) -> (i32);
/// Returns an index for wasm's `memory.atomic.wait64` for locally defined memories.
memory_atomic_wait64(vmctx, i32, i32, i64, i64) -> (i32);
/// Returns an index for wasm's `memory.atomic.wait64` for imported memories.
imported_memory_atomic_wait64(vmctx, i32, i32, i64, i64) -> (i32);
} }
}; };
} }

View File

@@ -58,7 +58,7 @@
use crate::externref::VMExternRef; use crate::externref::VMExternRef;
use crate::table::Table; use crate::table::Table;
use crate::traphandlers::raise_lib_trap; use crate::traphandlers::{raise_lib_trap, Trap};
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext}; use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext};
use std::mem; use std::mem;
use std::ptr::{self, NonNull}; use std::ptr::{self, NonNull};
@@ -496,3 +496,88 @@ pub unsafe extern "C" fn wasmtime_externref_global_set(
let old = mem::replace((*global).as_externref_mut(), externref); let old = mem::replace((*global).as_externref_mut(), externref);
drop(old); drop(old);
} }
#[derive(Debug)]
struct Unimplemented(&'static str);
impl std::error::Error for Unimplemented {}
impl std::fmt::Display for Unimplemented {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(f, "unimplemented: {}", self.0)
}
}
/// Implementation of `memory.atomic.notify` for locally defined memories.
pub unsafe extern "C" fn wasmtime_memory_atomic_notify(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_count: u32,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_memory_atomic_notify) unsupported",
))));
}
/// Implementation of `memory.atomic.notify` for imported memories.
pub unsafe extern "C" fn wasmtime_imported_memory_atomic_notify(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_count: u32,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_imported_memory_atomic_notify) unsupported",
))));
}
/// Implementation of `memory.atomic.wait32` for locally defined memories.
pub unsafe extern "C" fn wasmtime_memory_atomic_wait32(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_expected: u32,
_timeout: u64,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_memory_atomic_wait32) unsupported",
))));
}
/// Implementation of `memory.atomic.wait32` for imported memories.
pub unsafe extern "C" fn wasmtime_imported_memory_atomic_wait32(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_expected: u32,
_timeout: u64,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_imported_memory_atomic_wait32) unsupported",
))));
}
/// Implementation of `memory.atomic.wait64` for locally defined memories.
pub unsafe extern "C" fn wasmtime_memory_atomic_wait64(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_expected: u64,
_timeout: u64,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_memory_atomic_wait32) unsupported",
))));
}
/// Implementation of `memory.atomic.wait32` for imported memories.
pub unsafe extern "C" fn wasmtime_imported_memory_atomic_wait64(
_vmctx: *mut VMContext,
_memory_index: u32,
_addr: u32,
_expected: u64,
_timeout: u64,
) -> u32 {
raise_lib_trap(Trap::User(Box::new(Unimplemented(
"wasm atomics (fn wasmtime_imported_memory_atomic_wait64) unsupported",
))));
}

View File

@@ -600,6 +600,18 @@ impl VMBuiltinFunctionsArray {
wasmtime_table_fill as usize; wasmtime_table_fill as usize;
ptrs[BuiltinFunctionIndex::table_fill_funcref().index() as usize] = ptrs[BuiltinFunctionIndex::table_fill_funcref().index() as usize] =
wasmtime_table_fill as usize; wasmtime_table_fill as usize;
ptrs[BuiltinFunctionIndex::memory_atomic_notify().index() as usize] =
wasmtime_memory_atomic_notify as usize;
ptrs[BuiltinFunctionIndex::imported_memory_atomic_notify().index() as usize] =
wasmtime_imported_memory_atomic_notify as usize;
ptrs[BuiltinFunctionIndex::memory_atomic_wait32().index() as usize] =
wasmtime_memory_atomic_wait32 as usize;
ptrs[BuiltinFunctionIndex::imported_memory_atomic_wait32().index() as usize] =
wasmtime_imported_memory_atomic_wait32 as usize;
ptrs[BuiltinFunctionIndex::memory_atomic_wait64().index() as usize] =
wasmtime_memory_atomic_wait64 as usize;
ptrs[BuiltinFunctionIndex::imported_memory_atomic_wait64().index() as usize] =
wasmtime_imported_memory_atomic_wait64 as usize;
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
for i in 0..ptrs.len() { for i in 0..ptrs.len() {

View File

@@ -14,6 +14,7 @@ fn run_wast(wast: &str, strategy: Strategy) -> anyhow::Result<()> {
let multi_memory = wast.iter().any(|s| s == "multi-memory"); let multi_memory = wast.iter().any(|s| s == "multi-memory");
let module_linking = wast.iter().any(|s| s == "module-linking"); let module_linking = wast.iter().any(|s| s == "module-linking");
let threads = wast.iter().any(|s| s == "threads");
let bulk_mem = multi_memory || wast.iter().any(|s| s == "bulk-memory-operations"); let bulk_mem = multi_memory || wast.iter().any(|s| s == "bulk-memory-operations");
// Some simd tests assume support for multiple tables, which are introduced // Some simd tests assume support for multiple tables, which are introduced
@@ -26,6 +27,7 @@ fn run_wast(wast: &str, strategy: Strategy) -> anyhow::Result<()> {
.wasm_reference_types(reftypes || module_linking) .wasm_reference_types(reftypes || module_linking)
.wasm_multi_memory(multi_memory || module_linking) .wasm_multi_memory(multi_memory || module_linking)
.wasm_module_linking(module_linking) .wasm_module_linking(module_linking)
.wasm_threads(threads)
.strategy(strategy)? .strategy(strategy)?
.cranelift_debug_verifier(true); .cranelift_debug_verifier(true);

View File

@@ -0,0 +1,52 @@
;; From https://bugzilla.mozilla.org/show_bug.cgi?id=1684861.
;;
(module
(type (;0;) (func))
(func $main (type 0)
i32.const -64
i32.const -63
memory.atomic.notify offset=1
unreachable)
(memory (;0;) 4 4)
(export "main" (func $main))
)
(assert_trap (invoke "main") "misaligned memory access")
(module
(type (;0;) (func))
(func $main (type 0)
i32.const -64
i32.const -63
memory.atomic.notify offset=65536
unreachable)
(memory (;0;) 4 4)
(export "main" (func $main))
)
(assert_trap (invoke "main") "out of bounds memory access")
(module
(type (;0;) (func))
(func $wait32 (type 0)
i32.const -64
i32.const 42
i64.const 0
memory.atomic.wait32 offset=1
unreachable)
(func $wait64 (type 0)
i32.const -64
i64.const 43
i64.const 0
memory.atomic.wait64 offset=3
unreachable)
(memory (;0;) 4 4)
(export "wait32" (func $wait32))
(export "wait64" (func $wait64))
)
(assert_trap (invoke "wait32") "misaligned memory access")
(assert_trap (invoke "wait64") "misaligned memory access")