Files
wasmtime/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif
Chris Fallin e694fb1312 Spectre mitigation on heap access overflow checks.
This PR adds a conditional move following a heap bounds check through
which the address to be accessed flows. This conditional move ensures
that even if the branch is mispredicted (access is actually out of
bounds, but speculation goes down in-bounds path), the acually accessed
address is zero (a NULL pointer) rather than the out-of-bounds address.

The mitigation is controlled by a flag that is off by default, but can
be set by the embedding. Note that in order to turn it on by default,
we would need to add conditional-move support to the current x86
backend; this does not appear to be present. Once the deprecated
backend is removed in favor of the new backend, IMHO we should turn
this flag on by default.

Note that the mitigation is unneccessary when we use the "huge heap"
technique on 64-bit systems, in which we allocate a range of virtual
address space such that no 32-bit offset can reach other data. Hence,
this only affects small-heap configurations.
2020-07-01 08:36:09 -07:00

124 lines
5.0 KiB
Plaintext

test legalizer
set enable_heap_access_spectre_mitigation=false
target x86_64
; Test legalization for various forms of heap addresses.
; regex: BB=block\d+
function %heap_addrs(i32, i64, i64 vmctx) {
gv4 = vmctx
gv0 = iadd_imm.i64 gv4, 64
gv1 = iadd_imm.i64 gv4, 72
gv2 = iadd_imm.i64 gv4, 80
gv3 = load.i32 notrap aligned gv4+88
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
heap3 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i64
heap4 = dynamic gv1, min 0x1_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv3, offset_guard 0x1000, index_type i32
heap6 = dynamic gv1, min 0x1_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
heap7 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i64
; check: heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; check: heap1 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i32
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i64
; check: heap3 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i64
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv3, offset_guard 4096, index_type i32
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, offset_guard 4096, index_type i64
block0(v0: i32, v1: i64, v3: i64):
; The fast-path; 32-bit index, static heap with a sufficient bound, no bounds check needed!
v4 = heap_addr.i64 heap0, v0, 0
; check: v12 = uextend.i64 v0
; check: v13 = iadd_imm v3, 64
; check: v4 = iadd v13, v12
v5 = heap_addr.i64 heap1, v0, 0
; check: v14 = icmp_imm ugt v0, 0x0001_0000
; check: brz v14, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB)
; check: $trap_1:
; nextln: trap heap_oob
; check: $resume_1:
; check: v15 = uextend.i64 v0
; check: v16 = iadd_imm.i64 v3, 64
; check: v5 = iadd v16, v15
v6 = heap_addr.i64 heap2, v1, 0
; check: v19 = iconst.i64 0x0001_0000_0000
; check: v17 = icmp.i64 ugt v1, v19
; check: brz v17, $(resume_2=$BB)
; nextln: jump $(trap_2=$BB)
; check: $trap_2:
; nextln: trap heap_oob
; check: $resume_2:
; check: v18 = iadd_imm.i64 v3, 64
; check: v6 = iadd v18, v1
v7 = heap_addr.i64 heap3, v1, 0
; check: v20 = icmp_imm.i64 ugt v1, 0x0001_0000
; check: brz v20, $(resume_3=$BB)
; nextln: jump $(trap_3=$BB)
; check: $trap_3:
; nextln: trap heap_oob
; check: $resume_3:
; check: v21 = iadd_imm.i64 v3, 64
; check: v7 = iadd v21, v1
v8 = heap_addr.i64 heap4, v0, 0
; check: v22 = load.i32 notrap aligned v3+88
; check: v23 = iadd_imm v22, 0
; check: v24 = icmp.i32 ugt v0, v23
; check: brz v24, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB)
; check: $trap_4:
; nextln: trap heap_oob
; check: $resume_4:
; check: v25 = uextend.i64 v0
; check: v26 = iadd_imm.i64 v3, 72
; check: v8 = iadd v26, v25
v9 = heap_addr.i64 heap5, v0, 0
; check: v27 = load.i32 notrap aligned v3+88
; check: v28 = iadd_imm v27, 0
; check: v29 = icmp.i32 ugt v0, v28
; check: brz v29, $(resume_5=$BB)
; nextln: jump $(trap_5=$BB)
; check: $trap_5:
; nextln: trap heap_oob
; check: $resume_5:
; check: v30 = uextend.i64 v0
; check: v31 = iadd_imm.i64 v3, 72
; check: v9 = iadd v31, v30
v10 = heap_addr.i64 heap6, v1, 0
; check: v32 = iadd_imm.i64 v3, 80
; check: v33 = iadd_imm v32, 0
; check: v34 = icmp.i64 ugt v1, v33
; check: brz v34, $(resume_6=$BB)
; nextln: jump $(trap_6=$BB)
; check: $trap_6:
; nextln: trap heap_oob
; check: $resume_6:
; check: v35 = iadd_imm.i64 v3, 72
; check: v10 = iadd v35, v1
v11 = heap_addr.i64 heap7, v1, 0
; check: v36 = iadd_imm.i64 v3, 80
; check: v37 = iadd_imm v36, 0
; check: v38 = icmp.i64 ugt v1, v37
; check: brz v38, $(resume_7=$BB)
; nextln: jump $(trap_7=$BB)
; check: $trap_7:
; nextln: trap heap_oob
; check: $resume_7:
; check: v39 = iadd_imm.i64 v3, 72
; check: v11 = iadd v39, v1
return
}