* Cranelift: remove non-egraphs optimization pipeline and `use_egraphs` option. This PR removes the LICM, GVN, and preopt passes, and associated support pieces, from `cranelift-codegen`. Not to worry, we still have optimizations: the egraph framework subsumes all of these, and has been on by default since #5181. A few decision points: - Filetests for the legacy LICM, GVN and simple_preopt were removed too. As we built optimizations in the egraph framework we wrote new tests for the equivalent functionality, and many of the old tests were testing specific behaviors in the old implementations that may not be relevant anymore. However if folks prefer I could take a different approach here and try to port over all of the tests. - The corresponding filetest modes (commands) were deleted too. The `test alias_analysis` mode remains, but no longer invokes a separate GVN first (since there is no separate GVN that will not also do alias analysis) so the tests were tweaked slightly to work with that. The egrpah testsuite also covers alias analysis. - The `divconst_magic_numbers` module is removed since it's unused without `simple_preopt`, though this is the one remaining optimization we still need to build in the egraphs framework, pending #5908. The magic numbers will live forever in git history so removing this in the meantime is not a major issue IMHO. - The `use_egraphs` setting itself was removed at both the Cranelift and Wasmtime levels. It has been marked deprecated for a few releases now (Wasmtime 6.0, 7.0, upcoming 8.0, and corresponding Cranelift versions) so I think this is probably OK. As an alternative if anyone feels strongly, we could leave the setting and make it a no-op. * Update test outputs for remaining test differences.
1132 lines
21 KiB
Plaintext
1132 lines
21 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
set enable_probestack=true
|
|
target riscv64
|
|
|
|
function %stack_addr_small() -> i64 {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_addr.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-16
|
|
; block0:
|
|
; load_addr a0,nsp+0
|
|
; add sp,+16
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x10
|
|
; block1: ; offset 0x14
|
|
; mv a0, sp
|
|
; addi sp, sp, 0x10
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %stack_addr_big() -> i64 {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_addr.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; lui a0,24
|
|
; addi a0,a0,1712
|
|
; call %Probestack
|
|
; add sp,-100016
|
|
; block0:
|
|
; load_addr a0,nsp+0
|
|
; add sp,+100016
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; lui a0, 0x18
|
|
; addi a0, a0, 0x6b0
|
|
; auipc t5, 0
|
|
; ld t5, 0xc(t5)
|
|
; j 0xc
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %Probestack 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; jalr t5
|
|
; lui t6, 0xfffe8
|
|
; addi t6, t6, -0x6b0
|
|
; add sp, t6, sp
|
|
; block1: ; offset 0x3c
|
|
; mv a0, sp
|
|
; lui t6, 0x18
|
|
; addi t6, t6, 0x6b0
|
|
; add sp, t6, sp
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %stack_load_small() -> i64 {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_load.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-16
|
|
; block0:
|
|
; load_addr t1,nsp+0
|
|
; ld a0,0(t1)
|
|
; add sp,+16
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x10
|
|
; block1: ; offset 0x14
|
|
; mv t1, sp
|
|
; ld a0, 0(t1)
|
|
; addi sp, sp, 0x10
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %stack_load_big() -> i64 {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_load.i64 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; lui a0,24
|
|
; addi a0,a0,1712
|
|
; call %Probestack
|
|
; add sp,-100016
|
|
; block0:
|
|
; load_addr t1,nsp+0
|
|
; ld a0,0(t1)
|
|
; add sp,+100016
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; lui a0, 0x18
|
|
; addi a0, a0, 0x6b0
|
|
; auipc t5, 0
|
|
; ld t5, 0xc(t5)
|
|
; j 0xc
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %Probestack 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; jalr t5
|
|
; lui t6, 0xfffe8
|
|
; addi t6, t6, -0x6b0
|
|
; add sp, t6, sp
|
|
; block1: ; offset 0x3c
|
|
; mv t1, sp
|
|
; ld a0, 0(t1)
|
|
; lui t6, 0x18
|
|
; addi t6, t6, 0x6b0
|
|
; add sp, t6, sp
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %stack_store_small(i64) {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-16
|
|
; block0:
|
|
; load_addr t2,nsp+0
|
|
; sd a0,0(t2)
|
|
; add sp,+16
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x10
|
|
; block1: ; offset 0x14
|
|
; mv t2, sp
|
|
; sd a0, 0(t2)
|
|
; addi sp, sp, 0x10
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %stack_store_big(i64) {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; lui a0,24
|
|
; addi a0,a0,1712
|
|
; call %Probestack
|
|
; add sp,-100016
|
|
; block0:
|
|
; load_addr t2,nsp+0
|
|
; sd a0,0(t2)
|
|
; add sp,+100016
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; lui a0, 0x18
|
|
; addi a0, a0, 0x6b0
|
|
; auipc t5, 0
|
|
; ld t5, 0xc(t5)
|
|
; j 0xc
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %Probestack 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; jalr t5
|
|
; lui t6, 0xfffe8
|
|
; addi t6, t6, -0x6b0
|
|
; add sp, t6, sp
|
|
; block1: ; offset 0x3c
|
|
; mv t2, sp
|
|
; sd a0, 0(t2)
|
|
; lui t6, 0x18
|
|
; addi t6, t6, 0x6b0
|
|
; add sp, t6, sp
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i8_spill_slot(i8) -> i8, i64 {
|
|
ss0 = explicit_slot 1000
|
|
|
|
block0(v0: i8):
|
|
v1 = iconst.i64 1
|
|
v2 = iconst.i64 2
|
|
v3 = iconst.i64 3
|
|
v4 = iconst.i64 4
|
|
v5 = iconst.i64 5
|
|
v6 = iconst.i64 6
|
|
v7 = iconst.i64 7
|
|
v8 = iconst.i64 8
|
|
v9 = iconst.i64 9
|
|
v10 = iconst.i64 10
|
|
v11 = iconst.i64 11
|
|
v12 = iconst.i64 12
|
|
v13 = iconst.i64 13
|
|
v14 = iconst.i64 14
|
|
v15 = iconst.i64 15
|
|
v16 = iconst.i64 16
|
|
v17 = iconst.i64 17
|
|
v18 = iconst.i64 18
|
|
v19 = iconst.i64 19
|
|
v20 = iconst.i64 20
|
|
v21 = iconst.i64 21
|
|
v22 = iconst.i64 22
|
|
v23 = iconst.i64 23
|
|
v24 = iconst.i64 24
|
|
v25 = iconst.i64 25
|
|
v26 = iconst.i64 26
|
|
v27 = iconst.i64 27
|
|
v28 = iconst.i64 28
|
|
v29 = iconst.i64 29
|
|
v30 = iconst.i64 30
|
|
v31 = iconst.i64 31
|
|
v32 = iconst.i64 32
|
|
v33 = iconst.i64 33
|
|
v34 = iconst.i64 34
|
|
v35 = iconst.i64 35
|
|
v36 = iconst.i64 36
|
|
v37 = iconst.i64 37
|
|
v38 = iconst.i64 38
|
|
v39 = iconst.i64 39
|
|
v40 = iconst.i64 30
|
|
v41 = iconst.i64 31
|
|
v42 = iconst.i64 32
|
|
v43 = iconst.i64 33
|
|
v44 = iconst.i64 34
|
|
v45 = iconst.i64 35
|
|
v46 = iconst.i64 36
|
|
v47 = iconst.i64 37
|
|
v48 = iconst.i64 38
|
|
v49 = iconst.i64 39
|
|
v50 = iconst.i64 30
|
|
v51 = iconst.i64 31
|
|
v52 = iconst.i64 32
|
|
v53 = iconst.i64 33
|
|
v54 = iconst.i64 34
|
|
v55 = iconst.i64 35
|
|
v56 = iconst.i64 36
|
|
v57 = iconst.i64 37
|
|
v58 = iconst.i64 38
|
|
v59 = iconst.i64 39
|
|
v60 = iconst.i64 30
|
|
v61 = iconst.i64 31
|
|
v62 = iconst.i64 32
|
|
v63 = iconst.i64 33
|
|
v64 = iconst.i64 34
|
|
v65 = iconst.i64 35
|
|
v66 = iconst.i64 36
|
|
v67 = iconst.i64 37
|
|
v68 = iconst.i64 38
|
|
v69 = iconst.i64 39
|
|
|
|
v70 = iadd.i64 v1, v2
|
|
v71 = iadd.i64 v3, v4
|
|
v72 = iadd.i64 v5, v6
|
|
v73 = iadd.i64 v7, v8
|
|
v74 = iadd.i64 v9, v10
|
|
v75 = iadd.i64 v11, v12
|
|
v76 = iadd.i64 v13, v14
|
|
v77 = iadd.i64 v15, v16
|
|
v78 = iadd.i64 v17, v18
|
|
v79 = iadd.i64 v19, v20
|
|
v80 = iadd.i64 v21, v22
|
|
v81 = iadd.i64 v23, v24
|
|
v82 = iadd.i64 v25, v26
|
|
v83 = iadd.i64 v27, v28
|
|
v84 = iadd.i64 v29, v30
|
|
v85 = iadd.i64 v31, v32
|
|
v86 = iadd.i64 v33, v34
|
|
v87 = iadd.i64 v35, v36
|
|
v88 = iadd.i64 v37, v38
|
|
v89 = iadd.i64 v39, v40
|
|
v90 = iadd.i64 v41, v42
|
|
v91 = iadd.i64 v43, v44
|
|
v92 = iadd.i64 v45, v46
|
|
v93 = iadd.i64 v47, v48
|
|
v94 = iadd.i64 v49, v50
|
|
v95 = iadd.i64 v51, v52
|
|
v96 = iadd.i64 v53, v54
|
|
v97 = iadd.i64 v55, v56
|
|
v98 = iadd.i64 v57, v58
|
|
v99 = iadd.i64 v59, v60
|
|
v100 = iadd.i64 v61, v62
|
|
v101 = iadd.i64 v63, v64
|
|
v102 = iadd.i64 v65, v66
|
|
v103 = iadd.i64 v67, v68
|
|
|
|
v104 = iadd.i64 v69, v70
|
|
v105 = iadd.i64 v71, v72
|
|
v106 = iadd.i64 v73, v74
|
|
v107 = iadd.i64 v75, v76
|
|
v108 = iadd.i64 v77, v78
|
|
v109 = iadd.i64 v79, v80
|
|
v110 = iadd.i64 v81, v82
|
|
v111 = iadd.i64 v83, v84
|
|
v112 = iadd.i64 v85, v86
|
|
v113 = iadd.i64 v87, v88
|
|
v114 = iadd.i64 v89, v90
|
|
v115 = iadd.i64 v91, v92
|
|
v116 = iadd.i64 v93, v94
|
|
v117 = iadd.i64 v95, v96
|
|
v118 = iadd.i64 v97, v98
|
|
v119 = iadd.i64 v99, v100
|
|
v120 = iadd.i64 v101, v102
|
|
|
|
v121 = iadd.i64 v103, v104
|
|
v122 = iadd.i64 v105, v106
|
|
v123 = iadd.i64 v107, v108
|
|
v124 = iadd.i64 v109, v110
|
|
v125 = iadd.i64 v111, v112
|
|
v126 = iadd.i64 v113, v114
|
|
v127 = iadd.i64 v115, v116
|
|
v128 = iadd.i64 v117, v118
|
|
v129 = iadd.i64 v119, v120
|
|
|
|
v130 = iadd.i64 v121, v122
|
|
v131 = iadd.i64 v123, v124
|
|
v132 = iadd.i64 v125, v126
|
|
v133 = iadd.i64 v127, v128
|
|
|
|
v134 = iadd.i64 v129, v130
|
|
v135 = iadd.i64 v131, v132
|
|
|
|
v136 = iadd.i64 v133, v134
|
|
v137 = iadd.i64 v135, v136
|
|
|
|
return v0, v137
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; sd s1,-8(sp)
|
|
; sd s2,-16(sp)
|
|
; sd s3,-24(sp)
|
|
; sd s4,-32(sp)
|
|
; sd s5,-40(sp)
|
|
; sd s6,-48(sp)
|
|
; sd s7,-56(sp)
|
|
; sd s8,-64(sp)
|
|
; sd s9,-72(sp)
|
|
; sd s10,-80(sp)
|
|
; sd s11,-88(sp)
|
|
; add sp,-1360
|
|
; block0:
|
|
; sd a0,1000(nominal_sp)
|
|
; li t0,2
|
|
; sd t0,1008(nominal_sp)
|
|
; li t0,4
|
|
; li t1,6
|
|
; li t2,8
|
|
; li a1,10
|
|
; li a2,12
|
|
; li a3,14
|
|
; li a4,16
|
|
; li a5,18
|
|
; li a6,20
|
|
; li a7,22
|
|
; li t3,24
|
|
; li s5,26
|
|
; li s6,28
|
|
; li s7,30
|
|
; li s8,32
|
|
; li s9,34
|
|
; li s10,36
|
|
; li s11,38
|
|
; li s1,30
|
|
; li s2,32
|
|
; li s3,34
|
|
; li s4,36
|
|
; li a0,38
|
|
; li t4,30
|
|
; sd t4,1256(nominal_sp)
|
|
; li t4,32
|
|
; sd t4,1248(nominal_sp)
|
|
; li t4,34
|
|
; sd t4,1240(nominal_sp)
|
|
; li t4,36
|
|
; sd t4,1232(nominal_sp)
|
|
; li t4,38
|
|
; sd t4,1224(nominal_sp)
|
|
; li t4,30
|
|
; sd t4,1216(nominal_sp)
|
|
; li t4,32
|
|
; sd t4,1208(nominal_sp)
|
|
; li t4,34
|
|
; sd t4,1200(nominal_sp)
|
|
; li t4,36
|
|
; sd t4,1192(nominal_sp)
|
|
; li t4,38
|
|
; sd t4,1184(nominal_sp)
|
|
; ld t4,1008(nominal_sp)
|
|
; addi t4,t4,1
|
|
; sd t4,1176(nominal_sp)
|
|
; addi t4,t0,3
|
|
; sd t4,1168(nominal_sp)
|
|
; addi t4,t1,5
|
|
; sd t4,1160(nominal_sp)
|
|
; addi t4,t2,7
|
|
; sd t4,1152(nominal_sp)
|
|
; addi t4,a1,9
|
|
; sd t4,1144(nominal_sp)
|
|
; addi t4,a2,11
|
|
; sd t4,1136(nominal_sp)
|
|
; addi t4,a3,13
|
|
; sd t4,1128(nominal_sp)
|
|
; addi t4,a4,15
|
|
; sd t4,1120(nominal_sp)
|
|
; addi t4,a5,17
|
|
; sd t4,1112(nominal_sp)
|
|
; addi t4,a6,19
|
|
; sd t4,1104(nominal_sp)
|
|
; addi t4,a7,21
|
|
; sd t4,1096(nominal_sp)
|
|
; addi t4,t3,23
|
|
; sd t4,1088(nominal_sp)
|
|
; addi t4,s5,25
|
|
; sd t4,1080(nominal_sp)
|
|
; addi t4,s6,27
|
|
; sd t4,1072(nominal_sp)
|
|
; addi t4,s7,29
|
|
; sd t4,1064(nominal_sp)
|
|
; addi t4,s8,31
|
|
; sd t4,1056(nominal_sp)
|
|
; addi t4,s9,33
|
|
; sd t4,1048(nominal_sp)
|
|
; addi t4,s10,35
|
|
; sd t4,1040(nominal_sp)
|
|
; addi t4,s11,37
|
|
; sd t4,1032(nominal_sp)
|
|
; addi t4,s1,39
|
|
; sd t4,1024(nominal_sp)
|
|
; addi t4,s2,31
|
|
; sd t4,1016(nominal_sp)
|
|
; addi t4,s3,33
|
|
; sd t4,1008(nominal_sp)
|
|
; addi s4,s4,35
|
|
; addi a0,a0,37
|
|
; ld t4,1256(nominal_sp)
|
|
; addi t4,t4,39
|
|
; ld t3,1248(nominal_sp)
|
|
; addi t0,t3,31
|
|
; ld t1,1240(nominal_sp)
|
|
; addi t1,t1,33
|
|
; ld a1,1232(nominal_sp)
|
|
; addi t2,a1,35
|
|
; ld a4,1224(nominal_sp)
|
|
; addi a1,a4,37
|
|
; ld a7,1216(nominal_sp)
|
|
; addi a2,a7,39
|
|
; ld a3,1208(nominal_sp)
|
|
; addi a3,a3,31
|
|
; ld a4,1200(nominal_sp)
|
|
; addi a4,a4,33
|
|
; ld a5,1192(nominal_sp)
|
|
; addi a5,a5,35
|
|
; ld a6,1184(nominal_sp)
|
|
; addi a6,a6,37
|
|
; ld a7,1176(nominal_sp)
|
|
; addi a7,a7,39
|
|
; ld t3,1160(nominal_sp)
|
|
; ld s11,1168(nominal_sp)
|
|
; add t3,s11,t3
|
|
; ld s7,1144(nominal_sp)
|
|
; ld s5,1152(nominal_sp)
|
|
; add s5,s5,s7
|
|
; ld s1,1128(nominal_sp)
|
|
; ld s10,1136(nominal_sp)
|
|
; add s6,s10,s1
|
|
; ld s7,1112(nominal_sp)
|
|
; ld s8,1120(nominal_sp)
|
|
; add s7,s8,s7
|
|
; ld s11,1096(nominal_sp)
|
|
; ld s9,1104(nominal_sp)
|
|
; add s8,s9,s11
|
|
; ld s9,1080(nominal_sp)
|
|
; ld s3,1088(nominal_sp)
|
|
; add s9,s3,s9
|
|
; ld s10,1064(nominal_sp)
|
|
; ld s11,1072(nominal_sp)
|
|
; add s10,s11,s10
|
|
; ld s11,1048(nominal_sp)
|
|
; ld s2,1056(nominal_sp)
|
|
; add s11,s2,s11
|
|
; ld s1,1032(nominal_sp)
|
|
; ld s2,1040(nominal_sp)
|
|
; add s1,s2,s1
|
|
; ld s3,1016(nominal_sp)
|
|
; ld s2,1024(nominal_sp)
|
|
; add s2,s2,s3
|
|
; ld s3,1008(nominal_sp)
|
|
; add s3,s3,s4
|
|
; add t4,a0,t4
|
|
; add t0,t0,t1
|
|
; add t1,t2,a1
|
|
; add t2,a2,a3
|
|
; add a0,a4,a5
|
|
; add a1,a6,a7
|
|
; add a2,t3,s5
|
|
; add a3,s6,s7
|
|
; add a4,s8,s9
|
|
; add a5,s10,s11
|
|
; add a6,s1,s2
|
|
; add t4,s3,t4
|
|
; add t0,t0,t1
|
|
; add t1,t2,a0
|
|
; add t2,a1,a2
|
|
; add a0,a3,a4
|
|
; add a1,a5,a6
|
|
; add t4,t4,t0
|
|
; add t0,t1,t2
|
|
; add t1,a0,a1
|
|
; add t4,t4,t0
|
|
; add a1,t1,t4
|
|
; ld a0,1000(nominal_sp)
|
|
; add sp,+1360
|
|
; ld s1,-8(sp)
|
|
; ld s2,-16(sp)
|
|
; ld s3,-24(sp)
|
|
; ld s4,-32(sp)
|
|
; ld s5,-40(sp)
|
|
; ld s6,-48(sp)
|
|
; ld s7,-56(sp)
|
|
; ld s8,-64(sp)
|
|
; ld s9,-72(sp)
|
|
; ld s10,-80(sp)
|
|
; ld s11,-88(sp)
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; sd s1, -8(sp)
|
|
; sd s2, -0x10(sp)
|
|
; sd s3, -0x18(sp)
|
|
; sd s4, -0x20(sp)
|
|
; sd s5, -0x28(sp)
|
|
; sd s6, -0x30(sp)
|
|
; sd s7, -0x38(sp)
|
|
; sd s8, -0x40(sp)
|
|
; sd s9, -0x48(sp)
|
|
; sd s10, -0x50(sp)
|
|
; sd s11, -0x58(sp)
|
|
; addi sp, sp, -0x550
|
|
; block1: ; offset 0x40
|
|
; sd a0, 0x3e8(sp)
|
|
; addi t0, zero, 2
|
|
; sd t0, 0x3f0(sp)
|
|
; addi t0, zero, 4
|
|
; addi t1, zero, 6
|
|
; addi t2, zero, 8
|
|
; addi a1, zero, 0xa
|
|
; addi a2, zero, 0xc
|
|
; addi a3, zero, 0xe
|
|
; addi a4, zero, 0x10
|
|
; addi a5, zero, 0x12
|
|
; addi a6, zero, 0x14
|
|
; addi a7, zero, 0x16
|
|
; addi t3, zero, 0x18
|
|
; addi s5, zero, 0x1a
|
|
; addi s6, zero, 0x1c
|
|
; addi s7, zero, 0x1e
|
|
; addi s8, zero, 0x20
|
|
; addi s9, zero, 0x22
|
|
; addi s10, zero, 0x24
|
|
; addi s11, zero, 0x26
|
|
; addi s1, zero, 0x1e
|
|
; addi s2, zero, 0x20
|
|
; addi s3, zero, 0x22
|
|
; addi s4, zero, 0x24
|
|
; addi a0, zero, 0x26
|
|
; addi t4, zero, 0x1e
|
|
; sd t4, 0x4e8(sp)
|
|
; addi t4, zero, 0x20
|
|
; sd t4, 0x4e0(sp)
|
|
; addi t4, zero, 0x22
|
|
; sd t4, 0x4d8(sp)
|
|
; addi t4, zero, 0x24
|
|
; sd t4, 0x4d0(sp)
|
|
; addi t4, zero, 0x26
|
|
; sd t4, 0x4c8(sp)
|
|
; addi t4, zero, 0x1e
|
|
; sd t4, 0x4c0(sp)
|
|
; addi t4, zero, 0x20
|
|
; sd t4, 0x4b8(sp)
|
|
; addi t4, zero, 0x22
|
|
; sd t4, 0x4b0(sp)
|
|
; addi t4, zero, 0x24
|
|
; sd t4, 0x4a8(sp)
|
|
; addi t4, zero, 0x26
|
|
; sd t4, 0x4a0(sp)
|
|
; ld t4, 0x3f0(sp)
|
|
; addi t4, t4, 1
|
|
; sd t4, 0x498(sp)
|
|
; addi t4, t0, 3
|
|
; sd t4, 0x490(sp)
|
|
; addi t4, t1, 5
|
|
; sd t4, 0x488(sp)
|
|
; addi t4, t2, 7
|
|
; sd t4, 0x480(sp)
|
|
; addi t4, a1, 9
|
|
; sd t4, 0x478(sp)
|
|
; addi t4, a2, 0xb
|
|
; sd t4, 0x470(sp)
|
|
; addi t4, a3, 0xd
|
|
; sd t4, 0x468(sp)
|
|
; addi t4, a4, 0xf
|
|
; sd t4, 0x460(sp)
|
|
; addi t4, a5, 0x11
|
|
; sd t4, 0x458(sp)
|
|
; addi t4, a6, 0x13
|
|
; sd t4, 0x450(sp)
|
|
; addi t4, a7, 0x15
|
|
; sd t4, 0x448(sp)
|
|
; addi t4, t3, 0x17
|
|
; sd t4, 0x440(sp)
|
|
; addi t4, s5, 0x19
|
|
; sd t4, 0x438(sp)
|
|
; addi t4, s6, 0x1b
|
|
; sd t4, 0x430(sp)
|
|
; addi t4, s7, 0x1d
|
|
; sd t4, 0x428(sp)
|
|
; addi t4, s8, 0x1f
|
|
; sd t4, 0x420(sp)
|
|
; addi t4, s9, 0x21
|
|
; sd t4, 0x418(sp)
|
|
; addi t4, s10, 0x23
|
|
; sd t4, 0x410(sp)
|
|
; addi t4, s11, 0x25
|
|
; sd t4, 0x408(sp)
|
|
; addi t4, s1, 0x27
|
|
; sd t4, 0x400(sp)
|
|
; addi t4, s2, 0x1f
|
|
; sd t4, 0x3f8(sp)
|
|
; addi t4, s3, 0x21
|
|
; sd t4, 0x3f0(sp)
|
|
; addi s4, s4, 0x23
|
|
; addi a0, a0, 0x25
|
|
; ld t4, 0x4e8(sp)
|
|
; addi t4, t4, 0x27
|
|
; ld t3, 0x4e0(sp)
|
|
; addi t0, t3, 0x1f
|
|
; ld t1, 0x4d8(sp)
|
|
; addi t1, t1, 0x21
|
|
; ld a1, 0x4d0(sp)
|
|
; addi t2, a1, 0x23
|
|
; ld a4, 0x4c8(sp)
|
|
; addi a1, a4, 0x25
|
|
; ld a7, 0x4c0(sp)
|
|
; addi a2, a7, 0x27
|
|
; ld a3, 0x4b8(sp)
|
|
; addi a3, a3, 0x1f
|
|
; ld a4, 0x4b0(sp)
|
|
; addi a4, a4, 0x21
|
|
; ld a5, 0x4a8(sp)
|
|
; addi a5, a5, 0x23
|
|
; ld a6, 0x4a0(sp)
|
|
; addi a6, a6, 0x25
|
|
; ld a7, 0x498(sp)
|
|
; addi a7, a7, 0x27
|
|
; ld t3, 0x488(sp)
|
|
; ld s11, 0x490(sp)
|
|
; add t3, s11, t3
|
|
; ld s7, 0x478(sp)
|
|
; ld s5, 0x480(sp)
|
|
; add s5, s5, s7
|
|
; ld s1, 0x468(sp)
|
|
; ld s10, 0x470(sp)
|
|
; add s6, s10, s1
|
|
; ld s7, 0x458(sp)
|
|
; ld s8, 0x460(sp)
|
|
; add s7, s8, s7
|
|
; ld s11, 0x448(sp)
|
|
; ld s9, 0x450(sp)
|
|
; add s8, s9, s11
|
|
; ld s9, 0x438(sp)
|
|
; ld s3, 0x440(sp)
|
|
; add s9, s3, s9
|
|
; ld s10, 0x428(sp)
|
|
; ld s11, 0x430(sp)
|
|
; add s10, s11, s10
|
|
; ld s11, 0x418(sp)
|
|
; ld s2, 0x420(sp)
|
|
; add s11, s2, s11
|
|
; ld s1, 0x408(sp)
|
|
; ld s2, 0x410(sp)
|
|
; add s1, s2, s1
|
|
; ld s3, 0x3f8(sp)
|
|
; ld s2, 0x400(sp)
|
|
; add s2, s2, s3
|
|
; ld s3, 0x3f0(sp)
|
|
; add s3, s3, s4
|
|
; add t4, a0, t4
|
|
; add t0, t0, t1
|
|
; add t1, t2, a1
|
|
; add t2, a2, a3
|
|
; add a0, a4, a5
|
|
; add a1, a6, a7
|
|
; add a2, t3, s5
|
|
; add a3, s6, s7
|
|
; add a4, s8, s9
|
|
; add a5, s10, s11
|
|
; add a6, s1, s2
|
|
; add t4, s3, t4
|
|
; add t0, t0, t1
|
|
; add t1, t2, a0
|
|
; add t2, a1, a2
|
|
; add a0, a3, a4
|
|
; add a1, a5, a6
|
|
; add t4, t4, t0
|
|
; add t0, t1, t2
|
|
; add t1, a0, a1
|
|
; add t4, t4, t0
|
|
; add a1, t1, t4
|
|
; ld a0, 0x3e8(sp)
|
|
; addi sp, sp, 0x550
|
|
; ld s1, -8(sp)
|
|
; ld s2, -0x10(sp)
|
|
; ld s3, -0x18(sp)
|
|
; ld s4, -0x20(sp)
|
|
; ld s5, -0x28(sp)
|
|
; ld s6, -0x30(sp)
|
|
; ld s7, -0x38(sp)
|
|
; ld s8, -0x40(sp)
|
|
; ld s9, -0x48(sp)
|
|
; ld s10, -0x50(sp)
|
|
; ld s11, -0x58(sp)
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_store(i128) {
|
|
ss0 = explicit_slot 16
|
|
|
|
block0(v0: i128):
|
|
stack_store.i128 v0, ss0
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-16
|
|
; block0:
|
|
; mv a2,a0
|
|
; load_addr a0,nsp+0
|
|
; sd a2,0(a0)
|
|
; sd a1,8(a0)
|
|
; add sp,+16
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x10
|
|
; block1: ; offset 0x14
|
|
; ori a2, a0, 0
|
|
; mv a0, sp
|
|
; sd a2, 0(a0)
|
|
; sd a1, 8(a0)
|
|
; addi sp, sp, 0x10
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_store_inst_offset(i128) {
|
|
ss0 = explicit_slot 16
|
|
ss1 = explicit_slot 16
|
|
|
|
block0(v0: i128):
|
|
stack_store.i128 v0, ss1+16
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-32
|
|
; block0:
|
|
; mv a2,a0
|
|
; load_addr a0,nsp+32
|
|
; sd a2,0(a0)
|
|
; sd a1,8(a0)
|
|
; add sp,+32
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x20
|
|
; block1: ; offset 0x14
|
|
; ori a2, a0, 0
|
|
; addi a0, sp, 0x20
|
|
; sd a2, 0(a0)
|
|
; sd a1, 8(a0)
|
|
; addi sp, sp, 0x20
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_store_big(i128) {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i128):
|
|
stack_store.i128 v0, ss0
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; lui a0,24
|
|
; addi a0,a0,1712
|
|
; call %Probestack
|
|
; add sp,-100016
|
|
; block0:
|
|
; mv a2,a0
|
|
; load_addr a0,nsp+0
|
|
; sd a2,0(a0)
|
|
; sd a1,8(a0)
|
|
; add sp,+100016
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; lui a0, 0x18
|
|
; addi a0, a0, 0x6b0
|
|
; auipc t5, 0
|
|
; ld t5, 0xc(t5)
|
|
; j 0xc
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %Probestack 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; jalr t5
|
|
; lui t6, 0xfffe8
|
|
; addi t6, t6, -0x6b0
|
|
; add sp, t6, sp
|
|
; block1: ; offset 0x3c
|
|
; ori a2, a0, 0
|
|
; mv a0, sp
|
|
; sd a2, 0(a0)
|
|
; sd a1, 8(a0)
|
|
; lui t6, 0x18
|
|
; addi t6, t6, 0x6b0
|
|
; add sp, t6, sp
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_load() -> i128 {
|
|
ss0 = explicit_slot 16
|
|
|
|
block0:
|
|
v0 = stack_load.i128 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-16
|
|
; block0:
|
|
; load_addr t2,nsp+0
|
|
; ld a0,0(t2)
|
|
; ld a1,8(t2)
|
|
; add sp,+16
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x10
|
|
; block1: ; offset 0x14
|
|
; mv t2, sp
|
|
; ld a0, 0(t2)
|
|
; ld a1, 8(t2)
|
|
; addi sp, sp, 0x10
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_load_inst_offset() -> i128 {
|
|
ss0 = explicit_slot 16
|
|
ss1 = explicit_slot 16
|
|
|
|
block0:
|
|
v0 = stack_load.i128 ss1+16
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; add sp,-32
|
|
; block0:
|
|
; load_addr t2,nsp+32
|
|
; ld a0,0(t2)
|
|
; ld a1,8(t2)
|
|
; add sp,+32
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; addi sp, sp, -0x20
|
|
; block1: ; offset 0x14
|
|
; addi t2, sp, 0x20
|
|
; ld a0, 0(t2)
|
|
; ld a1, 8(t2)
|
|
; addi sp, sp, 0x20
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|
|
function %i128_stack_load_big() -> i128 {
|
|
ss0 = explicit_slot 100000
|
|
ss1 = explicit_slot 8
|
|
|
|
block0:
|
|
v0 = stack_load.i128 ss0
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; add sp,-16
|
|
; sd ra,8(sp)
|
|
; sd fp,0(sp)
|
|
; mv fp,sp
|
|
; lui a0,24
|
|
; addi a0,a0,1712
|
|
; call %Probestack
|
|
; add sp,-100016
|
|
; block0:
|
|
; load_addr t2,nsp+0
|
|
; ld a0,0(t2)
|
|
; ld a1,8(t2)
|
|
; add sp,+100016
|
|
; ld ra,8(sp)
|
|
; ld fp,0(sp)
|
|
; add sp,+16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi sp, sp, -0x10
|
|
; sd ra, 8(sp)
|
|
; sd s0, 0(sp)
|
|
; ori s0, sp, 0
|
|
; lui a0, 0x18
|
|
; addi a0, a0, 0x6b0
|
|
; auipc t5, 0
|
|
; ld t5, 0xc(t5)
|
|
; j 0xc
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %Probestack 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; jalr t5
|
|
; lui t6, 0xfffe8
|
|
; addi t6, t6, -0x6b0
|
|
; add sp, t6, sp
|
|
; block1: ; offset 0x3c
|
|
; mv t2, sp
|
|
; ld a0, 0(t2)
|
|
; ld a1, 8(t2)
|
|
; lui t6, 0x18
|
|
; addi t6, t6, 0x6b0
|
|
; add sp, t6, sp
|
|
; ld ra, 8(sp)
|
|
; ld s0, 0(sp)
|
|
; addi sp, sp, 0x10
|
|
; ret
|
|
|