This PR changes the aarch64 ABI implementation to use positive offsets from SP, rather than negative offsets from FP, to refer to spill slots and stack-local storage. This allows for better addressing-mode options, and hence slightly better code: e.g., the unsigned scaled 12-bit offset mode can be used to reach anywhere in a 32KB frame without extra address-construction instructions, whereas negative offsets are limited to a signed 9-bit unscaled mode (-256 bytes). To enable this, the PR introduces a notion of "nominal SP offsets" as a virtual addressing mode, lowered during the emission pass. The offsets are relative to "SP after adjusting downward to allocate stack/spill slots", but before pushing clobbers. This allows the addressing-mode expressions to be generated before register allocation (or during it, for spill/reload sequences). To convert these offsets into *true* offsets from SP, we need to track how much further SP is moved downward, and compensate for this. We do so with "virtual SP offset adjustment" pseudo-instructions: these are seen by the emission pass, and result in no instruction (0 byte output), but update state that is now threaded through each instruction emission in turn. In this way, we can push e.g. stack args for a call and adjust the virtual SP offset, allowing reloads from nominal-SP-relative spillslots while we do the argument setup with "real SP offsets" at the same time.
116 lines
2.1 KiB
Plaintext
116 lines
2.1 KiB
Plaintext
test vcode
|
|
target aarch64
|
|
|
|
function %stack_addr_small() -> i64 {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_addr.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub sp, sp, #16
|
|
; nextln: mov x0, sp
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %stack_addr_big() -> i64 {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_addr.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ldr x16, 8 ; b 12 ; data 100016
|
|
; nextln: sub sp, sp, x16, UXTX
|
|
; nextln: mov x0, sp
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
; FIXME: don't use stack_addr legalization for stack_load and stack_store
|
|
|
|
function %stack_load_small() -> i64 {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_load.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub sp, sp, #16
|
|
; nextln: mov x0, sp
|
|
; nextln: ldur x0, [x0]
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %stack_load_big() -> i64 {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_load.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ldr x16, 8 ; b 12 ; data 100016
|
|
; nextln: sub sp, sp, x16, UXTX
|
|
; nextln: mov x0, sp
|
|
; nextln: ldur x0, [x0]
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %stack_store_small(i64) {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
return
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub sp, sp, #16
|
|
; nextln: mov x1, sp
|
|
; nextln: stur x0, [x1]
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %stack_store_big(i64) {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
return
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ldr x16, 8 ; b 12 ; data 100016
|
|
; nextln: sub sp, sp, x16, UXTX
|
|
; nextln: mov x1, sp
|
|
; nextln: stur x0, [x1]
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|