refactor: move all 'filetests/vcode' tests to 'filetests/isa'

This commit is contained in:
Andrew Brown
2020-09-28 15:16:23 -07:00
parent 452d854855
commit b43f4a464a
34 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,183 @@
test compile
target aarch64
function %foo() {
block0:
return
}
function %stack_limit_leaf_zero(i64 stack_limit) {
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_gv_leaf_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_call_zero(i64 stack_limit) {
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_gv_call_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit(i64 stack_limit) {
ss0 = explicit_slot 168
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x16, x0, #176
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #176
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %huge_stack_limit(i64 stack_limit) {
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x0, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble_huge(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x16, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble_huge_offset(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+400000
stack_limit = gv1
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret