Implement inline stack probes for AArch64 (#5353)
* Turn off probestack by default in Cranelift The probestack feature is not implemented for the aarch64 and s390x backends and currently the on-by-default status requires the aarch64 and s390x implementations to be a stub. Turning off probestack by default allows the s390x and aarch64 backends to panic with an error message to avoid providing a false sense of security. When the probestack option is implemented for all backends, however, it may be reasonable to re-enable. * aarch64: Improve codegen for AMode fallback Currently the final fallback for finalizing an `AMode` will generate both a constant-loading instruction as well as an `add` instruction to the base register into the same temporary. This commit improves the codegen by removing the `add` instruction and folding the final add into the finalized `AMode`. This changes the `extendop` used but both registers are 64-bit so shouldn't be affected by the extending operation. * aarch64: Implement inline stack probes This commit implements inline stack probes for the aarch64 backend in Cranelift. The support here is modeled after the x64 support where unrolled probes are used up to a particular threshold after which a loop is generated. The instructions here are similar in spirit to x64 except that unlike x64 the stack pointer isn't modified during the unrolled loop to avoid needing to re-adjust it back up at the end of the loop. * Enable inline probestack for AArch64 and Riscv64 This commit enables inline probestacks for the AArch64 and Riscv64 architectures in the same manner that x86_64 has it enabled now. Some more testing was additionally added since on Unix platforms we should be guaranteed that Rust's stack overflow message is now printed too. * Enable probestack for aarch64 in cranelift-fuzzgen * Address review comments * Remove implicit stack overflow traps from x64 backend This commit removes implicit `StackOverflow` traps inserted by the x64 backend for stack-based operations. This was historically required when stack overflow was detected with page faults but Wasmtime no longer requires that since it's not suitable for wasm modules which call host functions. Additionally no other backend implements this form of implicit trap-code additions so this is intended to synchronize the behavior of all the backends. This fixes a test added prior for aarch64 to properly abort the process instead of accidentally being caught by Wasmtime. * Fix a style issue
This commit is contained in:
@@ -0,0 +1,71 @@
|
||||
test compile precise-output
|
||||
set enable_probestack=true
|
||||
set probestack_strategy=inline
|
||||
; This is the default and is equivalent to a page size of 4096
|
||||
set probestack_size_log2=12
|
||||
target aarch64
|
||||
|
||||
|
||||
; If the stack size is just one page, we can avoid the stack probe entirely
|
||||
function %single_page() -> i64 system_v {
|
||||
ss0 = explicit_slot 2048
|
||||
|
||||
block0:
|
||||
v1 = stack_addr.i64 ss0
|
||||
return v1
|
||||
}
|
||||
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; sub sp, sp, #2048
|
||||
; block0:
|
||||
; mov x0, sp
|
||||
; add sp, sp, #2048
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
function %unrolled() -> i64 system_v {
|
||||
ss0 = explicit_slot 12288
|
||||
|
||||
block0:
|
||||
v1 = stack_addr.i64 ss0
|
||||
return v1
|
||||
}
|
||||
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; movn x16, #4095 ; str wzr, [sp, x16, SXTX]
|
||||
; movn x16, #8191 ; str wzr, [sp, x16, SXTX]
|
||||
; movn x16, #12287 ; str wzr, [sp, x16, SXTX]
|
||||
; sub sp, sp, #12288
|
||||
; block0:
|
||||
; mov x0, sp
|
||||
; add sp, sp, #12288
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
function %large() -> i64 system_v {
|
||||
ss0 = explicit_slot 100000
|
||||
|
||||
block0:
|
||||
v1 = stack_addr.i64 ss0
|
||||
return v1
|
||||
}
|
||||
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; movz x16, #0
|
||||
; movz w17, #34464
|
||||
; movk w17, w17, #1, LSL #16
|
||||
; stack_probe_loop x16, x17, #4096
|
||||
; movz w16, #34464
|
||||
; movk w16, w16, #1, LSL #16
|
||||
; sub sp, sp, x16, UXTX
|
||||
; block0:
|
||||
; mov x0, sp
|
||||
; movz w16, #34464
|
||||
; movk w16, w16, #1, LSL #16
|
||||
; add sp, sp, x16, UXTX
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
@@ -177,7 +177,7 @@ block0(v0: i64):
|
||||
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; movz w16, #6784 ; movk w16, w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
|
||||
; movz w16, #6784 ; movk w16, w16, #6, LSL #16 ; ldr x16, [x0, x16, SXTX]
|
||||
; add x16, x16, #32
|
||||
; subs xzr, sp, x16, UXTX
|
||||
; b.hs 8 ; udf
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
set enable_probestack=true
|
||||
target riscv64
|
||||
|
||||
function %foo() {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
set enable_probestack=true
|
||||
target riscv64
|
||||
|
||||
function %stack_addr_small() -> i64 {
|
||||
|
||||
@@ -6,9 +6,11 @@ set probestack_strategy=inline
|
||||
; This is the default and is equivalent to a page size of 4096
|
||||
set probestack_size_log2=12
|
||||
target x86_64
|
||||
target aarch64
|
||||
; Test also with 64k pages
|
||||
set probestack_size_log2=16
|
||||
target x86_64
|
||||
target aarch64
|
||||
|
||||
; Create a huge stack slot (1MB), way larger than PAGE_SIZE and touch the end of it.
|
||||
; This guarantees that we bypass the guard page, cause a page fault the OS isn't expecting
|
||||
|
||||
Reference in New Issue
Block a user