This PR adds a conditional move following a heap bounds check through which the address to be accessed flows. This conditional move ensures that even if the branch is mispredicted (access is actually out of bounds, but speculation goes down in-bounds path), the acually accessed address is zero (a NULL pointer) rather than the out-of-bounds address. The mitigation is controlled by a flag that is off by default, but can be set by the embedding. Note that in order to turn it on by default, we would need to add conditional-move support to the current x86 backend; this does not appear to be present. Once the deprecated backend is removed in favor of the new backend, IMHO we should turn this flag on by default. Note that the mitigation is unneccessary when we use the "huge heap" technique on 64-bit systems, in which we allocate a range of virtual address space such that no 32-bit offset can reach other data. Hence, this only affects small-heap configurations.
116 lines
3.3 KiB
Plaintext
116 lines
3.3 KiB
Plaintext
; Test the legalization of memory objects.
|
|
test legalizer
|
|
set enable_heap_access_spectre_mitigation=false
|
|
target x86_64
|
|
|
|
; regex: V=v\d+
|
|
; regex: BB=block\d+
|
|
|
|
function %vmctx(i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, -16
|
|
|
|
block1(v1: i64):
|
|
v2 = global_value.i64 gv1
|
|
; check: v2 = iadd_imm v1, -16
|
|
return v2
|
|
; check: return v2
|
|
}
|
|
|
|
function %load(i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
gv1 = load.i64 notrap aligned gv0-16
|
|
gv2 = iadd_imm.i64 gv1, 32
|
|
|
|
block1(v1: i64):
|
|
v2 = global_value.i64 gv2
|
|
; check: $(p1=$V) = load.i64 notrap aligned v1-16
|
|
; check: v2 = iadd_imm $p1, 32
|
|
return v2
|
|
; check: return v2
|
|
}
|
|
|
|
function %symbol() -> i64 {
|
|
gv0 = symbol %something
|
|
gv1 = symbol u123:456
|
|
|
|
block1:
|
|
v0 = global_value.i64 gv0
|
|
; check: v0 = symbol_value.i64 gv0
|
|
v1 = global_value.i64 gv1
|
|
; check: v1 = symbol_value.i64 gv1
|
|
v2 = bxor v0, v1
|
|
return v2
|
|
}
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; This eliminates bounds checks completely for offsets < 2GB.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v999: i64):
|
|
; check: block0(
|
|
v1 = heap_addr.i64 heap0, v0, 1
|
|
; Boundscheck should be eliminated.
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 v0
|
|
; nextln: $(hbase=$V) = iadd_imm v999, 64
|
|
; nextln: v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+16
|
|
; nextln: v2 = load.f32 v1+16
|
|
v3 = load.f32 v1+20
|
|
; nextln: v3 = load.f32 v1+20
|
|
v4 = fadd v2, v3
|
|
return v4
|
|
}
|
|
|
|
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v999: i64):
|
|
; Everything after the obviously OOB access should be eliminated, leaving
|
|
; the `trap heap_oob` instruction as the terminator of the block and moving
|
|
; the remainder of the instructions into an inaccessible block.
|
|
; check: block0(
|
|
; nextln: trap heap_oob
|
|
; check: block1:
|
|
; nextln: v1 = iconst.i64 0
|
|
; nextln: v2 = load.f32 v1+16
|
|
; nextln: return v2
|
|
; nextln: }
|
|
v1 = heap_addr.i64 heap0, v0, 0x1000_0001
|
|
v2 = load.f32 v1+16
|
|
return v2
|
|
}
|
|
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; Offsets >= 2 GB do require a boundscheck.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v999: i64):
|
|
; check: block0(
|
|
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
|
; Boundscheck code
|
|
; check: $(oob=$V) = icmp
|
|
; nextln: brz $oob, $(ok=$BB)
|
|
; nextln: jump $(trap_oob=$BB)
|
|
; check: $trap_oob:
|
|
; nextln: trap heap_oob
|
|
; check: $ok:
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 v0
|
|
; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64
|
|
; nextln: v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+0x7fff_ffff
|
|
; nextln: v2 = load.f32 v1+0x7fff_ffff
|
|
return v2
|
|
}
|