test compile set unwind_info=false target aarch64 function %foo() { block0: return } function %stack_limit_leaf_zero(i64 stack_limit) { block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %stack_limit_gv_leaf_zero(i64 vmctx) { gv0 = vmctx gv1 = load.i64 notrap aligned gv0 gv2 = load.i64 notrap aligned gv1+4 stack_limit = gv2 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %stack_limit_call_zero(i64 stack_limit) { fn0 = %foo() block0(v0: i64): call fn0() return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: subs xzr, sp, x0 ; nextln: b.hs 8 ; udf ; nextln: ldr x0 ; nextln: blr x0 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %stack_limit_gv_call_zero(i64 vmctx) { gv0 = vmctx gv1 = load.i64 notrap aligned gv0 gv2 = load.i64 notrap aligned gv1+4 stack_limit = gv2 fn0 = %foo() block0(v0: i64): call fn0() return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: ldur x16, [x0] ; nextln: ldur x16, [x16, #4] ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: ldr x0 ; nextln: blr x0 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %stack_limit(i64 stack_limit) { ss0 = explicit_slot 168 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: add x16, x0, #176 ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: sub sp, sp, #176 ; nextln: add sp, sp, #176 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %huge_stack_limit(i64 stack_limit) { ss0 = explicit_slot 400000 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: subs xzr, sp, x0 ; nextln: b.hs 8 ; udf ; nextln: movz w17, #6784 ; nextln: movk w17, #6, LSL #16 ; nextln: add x16, x0, x17, UXTX ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: movz w16, #6784 ; nextln: movk w16, #6, LSL #16 ; nextln: sub sp, sp, x16, UXTX ; nextln: movz w16, #6784 ; nextln: movk w16, #6, LSL #16 ; nextln: add sp, sp, x16, UXTX ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %limit_preamble(i64 vmctx) { gv0 = vmctx gv1 = load.i64 notrap aligned gv0 gv2 = load.i64 notrap aligned gv1+4 stack_limit = gv2 ss0 = explicit_slot 20 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: ldur x16, [x0] ; nextln: ldur x16, [x16, #4] ; nextln: add x16, x16, #32 ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: sub sp, sp, #32 ; nextln: add sp, sp, #32 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %limit_preamble_huge(i64 vmctx) { gv0 = vmctx gv1 = load.i64 notrap aligned gv0 gv2 = load.i64 notrap aligned gv1+4 stack_limit = gv2 ss0 = explicit_slot 400000 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: ldur x16, [x0] ; nextln: ldur x16, [x16, #4] ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: movz w17, #6784 ; nextln: movk w17, #6, LSL #16 ; nextln: add x16, x16, x17, UXTX ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: movz w16, #6784 ; nextln: movk w16, #6, LSL #16 ; nextln: sub sp, sp, x16, UXTX ; nextln: movz w16, #6784 ; nextln: movk w16, #6, LSL #16 ; nextln: add sp, sp, x16, UXTX ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret function %limit_preamble_huge_offset(i64 vmctx) { gv0 = vmctx gv1 = load.i64 notrap aligned gv0+400000 stack_limit = gv1 ss0 = explicit_slot 20 block0(v0: i64): return } ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp ; nextln: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16] ; nextln: add x16, x16, #32 ; nextln: subs xzr, sp, x16 ; nextln: b.hs 8 ; udf ; nextln: sub sp, sp, #32 ; nextln: add sp, sp, #32 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret