Files
wasmtime/cranelift/filetests/filetests/isa/x64/heap.clif
Nick Fitzgerald 79f7fa6079 Cranelift: implement heap_{load,store} instruction legalization (#5351)
* Cranelift: implement `heap_{load,store}` instruction legalization

This does not remove `heap_addr` yet, but it does factor out the common
bounds-check-and-compute-the-native-address functionality that is shared between
all of `heap_{addr,load,store}`.

Finally, this adds a missing optimization for when we can dedupe explicit bounds
checks for static memories and Spectre mitigations.

* Cranelift: Enable `heap_load_store_*` run tests on all targets
2022-11-30 19:12:49 +00:00

148 lines
4.1 KiB
Plaintext

test compile precise-output
target x86_64
;; Calculate a heap address on a dynamically-allocated memory. Because the
;; Spectre mitigations are on by default (i.e.,
;; `set enable_heap_access_spectre_mitigation=true`), this code not only does
;; the dynamic bounds check (`add + jnb + cmp + jbe + j`) but also re-compares
;; the address to the upper bound (`add + xor + cmp + cmov`)--Cranelift's
;; Spectre mitigation. With loads and ignoring intermediate `mov`s, this amounts
;; to a 10-instruction sequence.
;;
;; And it uses quite a few registers; see this breakdown of what each register
;; generally contains:
;; - %rax holds the passed-in heap offset (argument #1) and ends up holding the
;; final address
;; - %rcx also holds the passed-in heap offset; checked for overflow when added
;; to the `0x8000` immediate
;; - %rsi holds the VM context pointer (argument #2)
;; - %rdi holds the heap limit (computed from argument #2)
;; - %rdx holds the null pointer
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000, 0
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %edi, %eax
; movq %rax, %rdi
; addq %rdi, $32768, %rdi
; jnb ; ud2 heap_oob ;
; movq 8(%rsi), %rcx
; addq %rax, 0(%rsi), %rax
; addq %rax, $32768, %rax
; xorq %rsi, %rsi, %rsi
; cmpq %rcx, %rdi
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; The heap address calculation for this statically-allocated memory checks that
;; the passed offset (%r11) is within bounds (`cmp + jbe + j`) and then includes
;; the same Spectre mitigation as above. This results in a 7-instruction
;; sequence (ignoring `mov`s).
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r9d
; movq %r9, %rax
; addq %rax, 0(%rdi), %rax
; xorq %r8, %r8, %r8
; cmpq $4096, %r9
; cmovnbeq %r8, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; When a static memory is the "right" size (4GB memory, 2GB guard regions), the
;; Spectre mitigation is not present. Cranelift relies on the memory permissions
;; and emits no bounds checking, simply `add`--a single instruction.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %esi
; movq %rsi, %r11
; addq %r11, $24, %r11
; jnb ; ud2 heap_oob ;
; movq %rdi, %rax
; addq %rax, %rsi, %rax
; addq %rax, $16, %rax
; xorq %rsi, %rsi, %rsi
; cmpq 0(%rdi), %r11
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r10d
; movq %rdi, %rax
; addq %rax, %r10, %rax
; addq %rax, $16, %rax
; xorq %r9, %r9, %r9
; cmpq $65512, %r10
; cmovnbeq %r9, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret