Files
wasmtime/cranelift/filetests/filetests/runtests/load-op-store.clif
Nick Fitzgerald fc62d4ad65 Cranelift: Make heap_addr return calculated base + index + offset (#5231)
* Cranelift: Make `heap_addr` return calculated `base + index + offset`

Rather than return just the `base + index`.

(Note: I've chosen to use the nomenclature "index" for the dynamic operand and
"offset" for the static immediate.)

This move the addition of the `offset` into `heap_addr`, instead of leaving it
for the subsequent memory operation, so that we can Spectre-guard the full
address, and not allow speculative execution to read the first 4GiB of memory.

Before this commit, we were effectively doing

    load(spectre_guard(base + index) + offset)

Now we are effectively doing

    load(spectre_guard(base + index + offset))

Finally, this also corrects `heap_addr`'s documented semantics to say that it
returns an address that will trap on access if `index + offset + access_size` is
out of bounds for the given heap, rather than saying that the `heap_addr` itself
will trap. This matches the implemented behavior for static memories, and after
https://github.com/bytecodealliance/wasmtime/pull/5190 lands (which is blocked
on this commit) will also match the implemented behavior for dynamic memories.

* Update heap_addr docs

* Factor out `offset + size` to a helper
2022-11-09 19:53:51 +00:00

99 lines
2.7 KiB
Plaintext

test run
target x86_64
target s390x
target aarch64
target riscv64
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
v4 = iconst.i64 42
store.i64 v4, v3
v5 = load.i64 v3
v6 = iadd.i64 v5, v2
store.i64 v6, v3
v7 = load.i64 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i32 42
store.i32 v4, v3
v5 = load.i32 v3
v6 = iadd.i32 v5, v2
store.i32 v6, v3
v7 = load.i32 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i8 42
store.i8 v4, v3
v5 = load.i8 v3
v6 = iadd.i8 v5, v2
store.i8 v6, v3
v7 = load.i8 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_isub_iand_ior_ixor_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
store.i64 v2, v3
v4 = load.i64 v3
v5 = iconst.i64 1
v6 = iadd.i64 v5, v4
store.i64 v6, v3
v7 = load.i64 v3
v8 = iconst.i64 2
v9 = load.i64 v3
v10 = isub.i64 v9, v8
store.i64 v10, v3
v11 = load.i64 v3
v12 = iconst.i64 0xf
v13 = band.i64 v12, v11
store.i64 v13, v3
v14 = iconst.i64 0x10
v15 = load.i64 v3
v16 = bor.i64 v15, v14
store.i64 v16, v3
v17 = load.i64 v3
v18 = iconst.i64 0xff
v19 = bxor.i64 v17, v18
store.i64 v19, v3
v20 = load.i64 v3
return v20
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 0x1234) == 236