Files
wasmtime/cranelift/filetests/filetests/isa/aarch64/stack.clif
Trevor Elliott 2e9b0802ab aarch64: Rework amode compilation to produce SSA code (#5369)
Rework the compilation of amodes in the aarch64 backend to stop reusing registers and instead generate fresh virtual registers for intermediates. This resolves some SSA checker violations with the aarch64 backend, and as a nice side-effect removes some unnecessary movs in the generated code.
2022-12-02 01:23:15 +00:00

553 lines
11 KiB
Plaintext

test compile precise-output
set unwind_info=false
target aarch64
function %stack_addr_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_addr_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %stack_load_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x1, sp
; ldr x0, [x1]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_load_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x1, sp
; ldr x0, [x1]
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %stack_store_small(i64) {
ss0 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x2, sp
; str x0, [x2]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_store_big(i64) {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x2, sp
; str x0, [x2]
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %i8_spill_slot(i8) -> i8, i64 {
ss0 = explicit_slot 1000
block0(v0: i8):
v1 = iconst.i64 1
v2 = iconst.i64 2
v3 = iconst.i64 3
v4 = iconst.i64 4
v5 = iconst.i64 5
v6 = iconst.i64 6
v7 = iconst.i64 7
v8 = iconst.i64 8
v9 = iconst.i64 9
v10 = iconst.i64 10
v11 = iconst.i64 11
v12 = iconst.i64 12
v13 = iconst.i64 13
v14 = iconst.i64 14
v15 = iconst.i64 15
v16 = iconst.i64 16
v17 = iconst.i64 17
v18 = iconst.i64 18
v19 = iconst.i64 19
v20 = iconst.i64 20
v21 = iconst.i64 21
v22 = iconst.i64 22
v23 = iconst.i64 23
v24 = iconst.i64 24
v25 = iconst.i64 25
v26 = iconst.i64 26
v27 = iconst.i64 27
v28 = iconst.i64 28
v29 = iconst.i64 29
v30 = iconst.i64 30
v31 = iconst.i64 31
v32 = iconst.i64 32
v33 = iconst.i64 33
v34 = iconst.i64 34
v35 = iconst.i64 35
v36 = iconst.i64 36
v37 = iconst.i64 37
v38 = iconst.i64 38
v39 = iconst.i64 39
v40 = iconst.i64 30
v41 = iconst.i64 31
v42 = iconst.i64 32
v43 = iconst.i64 33
v44 = iconst.i64 34
v45 = iconst.i64 35
v46 = iconst.i64 36
v47 = iconst.i64 37
v48 = iconst.i64 38
v49 = iconst.i64 39
v50 = iconst.i64 30
v51 = iconst.i64 31
v52 = iconst.i64 32
v53 = iconst.i64 33
v54 = iconst.i64 34
v55 = iconst.i64 35
v56 = iconst.i64 36
v57 = iconst.i64 37
v58 = iconst.i64 38
v59 = iconst.i64 39
v60 = iconst.i64 30
v61 = iconst.i64 31
v62 = iconst.i64 32
v63 = iconst.i64 33
v64 = iconst.i64 34
v65 = iconst.i64 35
v66 = iconst.i64 36
v67 = iconst.i64 37
v68 = iconst.i64 38
v69 = iconst.i64 39
v70 = iadd.i64 v1, v2
v71 = iadd.i64 v3, v4
v72 = iadd.i64 v5, v6
v73 = iadd.i64 v7, v8
v74 = iadd.i64 v9, v10
v75 = iadd.i64 v11, v12
v76 = iadd.i64 v13, v14
v77 = iadd.i64 v15, v16
v78 = iadd.i64 v17, v18
v79 = iadd.i64 v19, v20
v80 = iadd.i64 v21, v22
v81 = iadd.i64 v23, v24
v82 = iadd.i64 v25, v26
v83 = iadd.i64 v27, v28
v84 = iadd.i64 v29, v30
v85 = iadd.i64 v31, v32
v86 = iadd.i64 v33, v34
v87 = iadd.i64 v35, v36
v88 = iadd.i64 v37, v38
v89 = iadd.i64 v39, v40
v90 = iadd.i64 v41, v42
v91 = iadd.i64 v43, v44
v92 = iadd.i64 v45, v46
v93 = iadd.i64 v47, v48
v94 = iadd.i64 v49, v50
v95 = iadd.i64 v51, v52
v96 = iadd.i64 v53, v54
v97 = iadd.i64 v55, v56
v98 = iadd.i64 v57, v58
v99 = iadd.i64 v59, v60
v100 = iadd.i64 v61, v62
v101 = iadd.i64 v63, v64
v102 = iadd.i64 v65, v66
v103 = iadd.i64 v67, v68
v104 = iadd.i64 v69, v70
v105 = iadd.i64 v71, v72
v106 = iadd.i64 v73, v74
v107 = iadd.i64 v75, v76
v108 = iadd.i64 v77, v78
v109 = iadd.i64 v79, v80
v110 = iadd.i64 v81, v82
v111 = iadd.i64 v83, v84
v112 = iadd.i64 v85, v86
v113 = iadd.i64 v87, v88
v114 = iadd.i64 v89, v90
v115 = iadd.i64 v91, v92
v116 = iadd.i64 v93, v94
v117 = iadd.i64 v95, v96
v118 = iadd.i64 v97, v98
v119 = iadd.i64 v99, v100
v120 = iadd.i64 v101, v102
v121 = iadd.i64 v103, v104
v122 = iadd.i64 v105, v106
v123 = iadd.i64 v107, v108
v124 = iadd.i64 v109, v110
v125 = iadd.i64 v111, v112
v126 = iadd.i64 v113, v114
v127 = iadd.i64 v115, v116
v128 = iadd.i64 v117, v118
v129 = iadd.i64 v119, v120
v130 = iadd.i64 v121, v122
v131 = iadd.i64 v123, v124
v132 = iadd.i64 v125, v126
v133 = iadd.i64 v127, v128
v134 = iadd.i64 v129, v130
v135 = iadd.i64 v131, v132
v136 = iadd.i64 v133, v134
v137 = iadd.i64 v135, v136
return v0, v137
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; stp x27, x28, [sp, #-16]!
; stp x25, x26, [sp, #-16]!
; stp x23, x24, [sp, #-16]!
; stp x21, x22, [sp, #-16]!
; stp x19, x20, [sp, #-16]!
; sub sp, sp, #1152
; block0:
; str x0, [sp, #1000]
; movz x6, #2
; add x9, x6, #1
; str x9, [sp, #1136]
; movz x6, #4
; add x10, x6, #3
; str x10, [sp, #1128]
; movz x6, #6
; add x11, x6, #5
; str x11, [sp, #1120]
; movz x6, #8
; add x12, x6, #7
; str x12, [sp, #1112]
; movz x6, #10
; add x13, x6, #9
; str x13, [sp, #1104]
; movz x6, #12
; add x14, x6, #11
; str x14, [sp, #1096]
; movz x6, #14
; add x15, x6, #13
; str x15, [sp, #1088]
; movz x6, #16
; add x1, x6, #15
; str x1, [sp, #1080]
; movz x6, #18
; add x2, x6, #17
; str x2, [sp, #1072]
; movz x6, #20
; add x3, x6, #19
; str x3, [sp, #1064]
; movz x6, #22
; add x4, x6, #21
; str x4, [sp, #1056]
; movz x6, #24
; add x5, x6, #23
; str x5, [sp, #1048]
; movz x6, #26
; add x6, x6, #25
; str x6, [sp, #1040]
; movz x6, #28
; add x7, x6, #27
; str x7, [sp, #1032]
; movz x6, #30
; add x24, x6, #29
; str x24, [sp, #1024]
; movz x6, #32
; add x25, x6, #31
; str x25, [sp, #1016]
; movz x6, #34
; add x26, x6, #33
; movz x6, #36
; add x27, x6, #35
; str x27, [sp, #1008]
; movz x6, #38
; add x27, x6, #37
; movz x6, #30
; add x28, x6, #39
; movz x6, #32
; add x21, x6, #31
; movz x6, #34
; add x19, x6, #33
; movz x6, #36
; add x20, x6, #35
; movz x6, #38
; add x22, x6, #37
; movz x6, #30
; add x23, x6, #39
; movz x6, #32
; add x0, x6, #31
; movz x6, #34
; add x8, x6, #33
; movz x6, #36
; add x9, x6, #35
; movz x6, #38
; add x10, x6, #37
; movz x6, #30
; add x11, x6, #39
; movz x6, #32
; add x12, x6, #31
; movz x6, #34
; add x13, x6, #33
; movz x6, #36
; add x14, x6, #35
; movz x6, #38
; add x15, x6, #37
; ldr x1, [sp, #1136]
; add x1, x1, #39
; ldr x3, [sp, #1120]
; ldr x2, [sp, #1128]
; add x2, x2, x3
; ldr x3, [sp, #1104]
; ldr x6, [sp, #1112]
; add x3, x6, x3
; ldr x4, [sp, #1088]
; ldr x5, [sp, #1096]
; add x4, x5, x4
; ldr x5, [sp, #1072]
; ldr x6, [sp, #1080]
; add x5, x6, x5
; ldr x7, [sp, #1056]
; ldr x6, [sp, #1064]
; add x6, x6, x7
; ldr x7, [sp, #1040]
; ldr x24, [sp, #1048]
; add x7, x24, x7
; ldr x24, [sp, #1024]
; ldr x25, [sp, #1032]
; add x24, x25, x24
; ldr x25, [sp, #1016]
; add x25, x25, x26
; ldr x26, [sp, #1008]
; add x26, x26, x27
; add x27, x28, x21
; add x28, x19, x20
; add x23, x22, x23
; add x8, x0, x8
; add x9, x9, x10
; add x10, x11, x12
; add x11, x13, x14
; add x12, x15, x1
; add x13, x2, x3
; add x14, x4, x5
; add x7, x6, x7
; add x15, x24, x25
; add x0, x26, x27
; add x1, x28, x23
; add x8, x8, x9
; add x9, x10, x11
; add x10, x12, x13
; add x7, x14, x7
; add x11, x15, x0
; add x8, x1, x8
; add x9, x9, x10
; add x7, x7, x11
; add x8, x8, x9
; add x1, x7, x8
; ldr x0, [sp, #1000]
; add sp, sp, #1152
; ldp x19, x20, [sp], #16
; ldp x21, x22, [sp], #16
; ldp x23, x24, [sp], #16
; ldp x25, x26, [sp], #16
; ldp x27, x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store(i128) {
ss0 = explicit_slot 16
block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x3, sp
; stp x0, x1, [x3]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store_inst_offset(i128) {
ss0 = explicit_slot 16
ss1 = explicit_slot 16
block0(v0: i128):
stack_store.i128 v0, ss1+16
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x3, sp, #32
; stp x0, x1, [x3]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store_big(i128) {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x3, sp
; stp x0, x1, [x3]
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load() -> i128 {
ss0 = explicit_slot 16
block0:
v0 = stack_load.i128 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x2, sp
; ldp x0, x1, [x2]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load_inst_offset() -> i128 {
ss0 = explicit_slot 16
ss1 = explicit_slot 16
block0:
v0 = stack_load.i128 ss1+16
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x2, sp, #32
; ldp x0, x1, [x2]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load_big() -> i128 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_load.i128 ss0
return v0
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x2, sp
; ldp x0, x1, [x2]
; movz w16, #34480
; movk w16, w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret