Cranelift: Make heap_addr return calculated base + index + offset (#5231)
* Cranelift: Make `heap_addr` return calculated `base + index + offset`
Rather than return just the `base + index`.
(Note: I've chosen to use the nomenclature "index" for the dynamic operand and
"offset" for the static immediate.)
This move the addition of the `offset` into `heap_addr`, instead of leaving it
for the subsequent memory operation, so that we can Spectre-guard the full
address, and not allow speculative execution to read the first 4GiB of memory.
Before this commit, we were effectively doing
load(spectre_guard(base + index) + offset)
Now we are effectively doing
load(spectre_guard(base + index + offset))
Finally, this also corrects `heap_addr`'s documented semantics to say that it
returns an address that will trap on access if `index + offset + access_size` is
out of bounds for the given heap, rather than saying that the `heap_addr` itself
will trap. This matches the implemented behavior for static memories, and after
https://github.com/bytecodealliance/wasmtime/pull/5190 lands (which is blocked
on this commit) will also match the implemented behavior for dynamic memories.
* Update heap_addr docs
* Factor out `offset + size` to a helper
This commit is contained in:
@@ -9,7 +9,7 @@ function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = heap_addr.i64 heap0, v1, 0
|
||||
v2 = heap_addr.i64 heap0, v1, 0, 0
|
||||
return v2
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ function %static_heap_check(i64 vmctx, i32) -> i64 {
|
||||
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = heap_addr.i64 heap0, v1, 0
|
||||
v2 = heap_addr.i64 heap0, v1, 0, 0
|
||||
return v2
|
||||
}
|
||||
|
||||
@@ -52,3 +52,59 @@ block0(v0: i64, v1: i32):
|
||||
; block2:
|
||||
; udf #0xc11f
|
||||
|
||||
|
||||
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = heap_addr.i64 heap0, v1, 16, 8
|
||||
return v2
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov w11, w1
|
||||
; ldr x10, [x0]
|
||||
; movz x9, #24
|
||||
; adds x11, x11, x9
|
||||
; b.lo 8 ; udf
|
||||
; subs xzr, x11, x10
|
||||
; b.ls label1 ; b label2
|
||||
; block1:
|
||||
; add x13, x0, x1, UXTW
|
||||
; add x13, x13, #16
|
||||
; movz x12, #0
|
||||
; subs xzr, x11, x10
|
||||
; csel x0, x12, x13, hi
|
||||
; csdb
|
||||
; ret
|
||||
; block2:
|
||||
; udf #0xc11f
|
||||
|
||||
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = heap_addr.i64 heap0, v1, 16, 8
|
||||
return v2
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov w9, w1
|
||||
; movz x10, #65512
|
||||
; subs xzr, x9, x10
|
||||
; b.ls label1 ; b label2
|
||||
; block1:
|
||||
; add x11, x0, x1, UXTW
|
||||
; add x11, x11, #16
|
||||
; movz x10, #65512
|
||||
; movz x12, #0
|
||||
; subs xzr, x9, x10
|
||||
; csel x0, x12, x11, hi
|
||||
; csdb
|
||||
; ret
|
||||
; block2:
|
||||
; udf #0xc11f
|
||||
|
||||
|
||||
Reference in New Issue
Block a user