Rework bounds checking for atomic operations (#5239)

Before, we would do a `heap_addr` to translate the given Wasm memory address
into a native memory address and pass it into the libcall that implemented the
atomic operation, which would then treat the address as a Wasm memory address
and pass it to `validate_atomic_addr` to be bounds checked a second time. This
is a bit nonsensical, as we are validating a native memory address as if it were
a Wasm memory address.

Now, we no longer do a `heap_addr` to translate the Wasm memory address to a
native memory address. Instead, we pass the Wasm memory address to the libcall,
and the libcall is responsible for doing the bounds check (by calling
`validate_atomic_addr` with the correct type of memory address now).
This commit is contained in:
Nick Fitzgerald
2022-11-09 16:19:43 -08:00
committed by GitHub
parent 86679489ef
commit 47fa1ad6a8
4 changed files with 74 additions and 52 deletions

View File

@@ -1981,6 +1981,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
expected: ir::Value,
timeout: ir::Value,
) -> WasmResult<ir::Value> {
let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index);
let implied_ty = pos.func.dfg.value_type(expected);
let (func_sig, memory_index, func_idx) =
self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty);
@@ -2006,6 +2007,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
addr: ir::Value,
count: ir::Value,
) -> WasmResult<ir::Value> {
let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index);
let func_sig = self
.builtin_function_signatures
.memory_atomic_notify(&mut pos.func);