* Cranelift: remove non-egraphs optimization pipeline and `use_egraphs` option. This PR removes the LICM, GVN, and preopt passes, and associated support pieces, from `cranelift-codegen`. Not to worry, we still have optimizations: the egraph framework subsumes all of these, and has been on by default since #5181. A few decision points: - Filetests for the legacy LICM, GVN and simple_preopt were removed too. As we built optimizations in the egraph framework we wrote new tests for the equivalent functionality, and many of the old tests were testing specific behaviors in the old implementations that may not be relevant anymore. However if folks prefer I could take a different approach here and try to port over all of the tests. - The corresponding filetest modes (commands) were deleted too. The `test alias_analysis` mode remains, but no longer invokes a separate GVN first (since there is no separate GVN that will not also do alias analysis) so the tests were tweaked slightly to work with that. The egrpah testsuite also covers alias analysis. - The `divconst_magic_numbers` module is removed since it's unused without `simple_preopt`, though this is the one remaining optimization we still need to build in the egraphs framework, pending #5908. The magic numbers will live forever in git history so removing this in the meantime is not a major issue IMHO. - The `use_egraphs` setting itself was removed at both the Cranelift and Wasmtime levels. It has been marked deprecated for a few releases now (Wasmtime 6.0, 7.0, upcoming 8.0, and corresponding Cranelift versions) so I think this is probably OK. As an alternative if anyone feels strongly, we could leave the setting and make it a no-op. * Update test outputs for remaining test differences.
935 lines
17 KiB
Plaintext
935 lines
17 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
set enable_probestack=false
|
|
target aarch64
|
|
|
|
function %f1(i64) -> i64 {
|
|
fn0 = %g(i64) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; load_ext_name x3, TestCase(%g)+0
|
|
; blr x3
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; ldr x3, #0x10
|
|
; b #0x18
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x3
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f2(i32) -> i64 {
|
|
fn0 = %g(i32 uext) -> i64
|
|
|
|
block0(v0: i32):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; load_ext_name x3, TestCase(%g)+0
|
|
; blr x3
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; ldr x3, #0x10
|
|
; b #0x18
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x3
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f3(i32) -> i32 uext {
|
|
block0(v0: i32):
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; ret
|
|
|
|
function %f4(i32) -> i64 {
|
|
fn0 = %g(i32 sext) -> i64
|
|
|
|
block0(v0: i32):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; load_ext_name x3, TestCase(%g)+0
|
|
; blr x3
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; ldr x3, #0x10
|
|
; b #0x18
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x3
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f5(i32) -> i32 sext {
|
|
block0(v0: i32):
|
|
return v0
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; ret
|
|
|
|
function %f6(i8) -> i64 {
|
|
fn0 = %g(i32, i32, i32, i32, i32, i32, i32, i32, i8 sext) -> i64
|
|
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 42
|
|
v2 = call fn0(v1, v1, v1, v1, v1, v1, v1, v1, v0)
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; movz w7, #42
|
|
; sub sp, sp, #16
|
|
; virtual_sp_offset_adjust 16
|
|
; strb w0, [sp]
|
|
; load_ext_name x8, TestCase(%g)+0
|
|
; mov x0, x7
|
|
; mov x1, x7
|
|
; mov x2, x7
|
|
; mov x3, x7
|
|
; mov x4, x7
|
|
; mov x5, x7
|
|
; mov x6, x7
|
|
; blr x8
|
|
; add sp, sp, #16
|
|
; virtual_sp_offset_adjust -16
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov w7, #0x2a
|
|
; sub sp, sp, #0x10
|
|
; sturb w0, [sp]
|
|
; ldr x8, #0x1c
|
|
; b #0x24
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x7
|
|
; mov x1, x7
|
|
; mov x2, x7
|
|
; mov x3, x7
|
|
; mov x4, x7
|
|
; mov x5, x7
|
|
; mov x6, x7
|
|
; blr x8
|
|
; add sp, sp, #0x10
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f7(i8) -> i32, i32, i32, i32, i32, i32, i32, i32, i8 sext {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 42
|
|
return v1, v1, v1, v1, v1, v1, v1, v1, v0
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; movz w7, #42
|
|
; strb w0, [x1]
|
|
; mov x0, x7
|
|
; mov x1, x7
|
|
; mov x2, x7
|
|
; mov x3, x7
|
|
; mov x4, x7
|
|
; mov x5, x7
|
|
; mov x6, x7
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov w7, #0x2a
|
|
; sturb w0, [x1]
|
|
; mov x0, x7
|
|
; mov x1, x7
|
|
; mov x2, x7
|
|
; mov x3, x7
|
|
; mov x4, x7
|
|
; mov x5, x7
|
|
; mov x6, x7
|
|
; ret
|
|
|
|
function %f8() {
|
|
fn0 = %g0() -> f32
|
|
fn1 = %g1() -> f64
|
|
fn2 = %g2()
|
|
fn3 = %g3(f32)
|
|
fn4 = %g4(f64)
|
|
|
|
block0:
|
|
v0 = call fn0()
|
|
v1 = call fn1()
|
|
v2 = call fn1()
|
|
call fn2()
|
|
call fn3(v0)
|
|
call fn4(v1)
|
|
call fn4(v2)
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; sub sp, sp, #48
|
|
; block0:
|
|
; load_ext_name x9, TestCase(%g0)+0
|
|
; blr x9
|
|
; str q0, [sp, #32]
|
|
; load_ext_name x9, TestCase(%g1)+0
|
|
; blr x9
|
|
; str q0, [sp, #16]
|
|
; load_ext_name x9, TestCase(%g1)+0
|
|
; blr x9
|
|
; str q0, [sp]
|
|
; load_ext_name x9, TestCase(%g2)+0
|
|
; blr x9
|
|
; load_ext_name x10, TestCase(%g3)+0
|
|
; ldr q0, [sp, #32]
|
|
; blr x10
|
|
; load_ext_name x11, TestCase(%g4)+0
|
|
; ldr q0, [sp, #16]
|
|
; blr x11
|
|
; load_ext_name x12, TestCase(%g4)+0
|
|
; ldr q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #48
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; sub sp, sp, #0x30
|
|
; block1: ; offset 0xc
|
|
; ldr x9, #0x14
|
|
; b #0x1c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g0 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x20]
|
|
; ldr x9, #0x2c
|
|
; b #0x34
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g1 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x10]
|
|
; ldr x9, #0x44
|
|
; b #0x4c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g1 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp]
|
|
; ldr x9, #0x5c
|
|
; b #0x64
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g2 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; ldr x10, #0x70
|
|
; b #0x78
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g3 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x20]
|
|
; blr x10
|
|
; ldr x11, #0x88
|
|
; b #0x90
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g4 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x10]
|
|
; blr x11
|
|
; ldr x12, #0xa0
|
|
; b #0xa8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g4 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #0x30
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f9() {
|
|
fn0 = %g0() -> i8x16
|
|
fn1 = %g1()
|
|
fn2 = %g2(i8x16)
|
|
|
|
block0:
|
|
v0 = call fn0()
|
|
v1 = call fn0()
|
|
v2 = call fn0()
|
|
call fn1()
|
|
call fn2(v0)
|
|
call fn2(v1)
|
|
call fn2(v2)
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; sub sp, sp, #48
|
|
; block0:
|
|
; load_ext_name x9, TestCase(%g0)+0
|
|
; blr x9
|
|
; str q0, [sp, #32]
|
|
; load_ext_name x9, TestCase(%g0)+0
|
|
; blr x9
|
|
; str q0, [sp, #16]
|
|
; load_ext_name x9, TestCase(%g0)+0
|
|
; blr x9
|
|
; str q0, [sp]
|
|
; load_ext_name x9, TestCase(%g1)+0
|
|
; blr x9
|
|
; load_ext_name x10, TestCase(%g2)+0
|
|
; ldr q0, [sp, #32]
|
|
; blr x10
|
|
; load_ext_name x11, TestCase(%g2)+0
|
|
; ldr q0, [sp, #16]
|
|
; blr x11
|
|
; load_ext_name x12, TestCase(%g2)+0
|
|
; ldr q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #48
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; sub sp, sp, #0x30
|
|
; block1: ; offset 0xc
|
|
; ldr x9, #0x14
|
|
; b #0x1c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g0 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x20]
|
|
; ldr x9, #0x2c
|
|
; b #0x34
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g0 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x10]
|
|
; ldr x9, #0x44
|
|
; b #0x4c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g0 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp]
|
|
; ldr x9, #0x5c
|
|
; b #0x64
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g1 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; ldr x10, #0x70
|
|
; b #0x78
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g2 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x20]
|
|
; blr x10
|
|
; ldr x11, #0x88
|
|
; b #0x90
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g2 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x10]
|
|
; blr x11
|
|
; ldr x12, #0xa0
|
|
; b #0xa8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g2 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #0x30
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f10() {
|
|
fn0 = %g0() -> f32
|
|
fn1 = %g1() -> f64
|
|
fn2 = %g2() -> i8x16
|
|
fn3 = %g3()
|
|
fn4 = %g4(f32)
|
|
fn5 = %g5(f64)
|
|
fn6 = %g6(i8x16)
|
|
|
|
block0:
|
|
v0 = call fn0()
|
|
v1 = call fn1()
|
|
v2 = call fn2()
|
|
call fn3()
|
|
call fn4(v0)
|
|
call fn5(v1)
|
|
call fn6(v2)
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; sub sp, sp, #48
|
|
; block0:
|
|
; load_ext_name x9, TestCase(%g0)+0
|
|
; blr x9
|
|
; str q0, [sp, #32]
|
|
; load_ext_name x9, TestCase(%g1)+0
|
|
; blr x9
|
|
; str q0, [sp, #16]
|
|
; load_ext_name x9, TestCase(%g2)+0
|
|
; blr x9
|
|
; str q0, [sp]
|
|
; load_ext_name x9, TestCase(%g3)+0
|
|
; blr x9
|
|
; load_ext_name x10, TestCase(%g4)+0
|
|
; ldr q0, [sp, #32]
|
|
; blr x10
|
|
; load_ext_name x11, TestCase(%g5)+0
|
|
; ldr q0, [sp, #16]
|
|
; blr x11
|
|
; load_ext_name x12, TestCase(%g6)+0
|
|
; ldr q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #48
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; sub sp, sp, #0x30
|
|
; block1: ; offset 0xc
|
|
; ldr x9, #0x14
|
|
; b #0x1c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g0 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x20]
|
|
; ldr x9, #0x2c
|
|
; b #0x34
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g1 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp, #0x10]
|
|
; ldr x9, #0x44
|
|
; b #0x4c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g2 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; stur q0, [sp]
|
|
; ldr x9, #0x5c
|
|
; b #0x64
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g3 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x9
|
|
; ldr x10, #0x70
|
|
; b #0x78
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g4 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x20]
|
|
; blr x10
|
|
; ldr x11, #0x88
|
|
; b #0x90
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g5 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp, #0x10]
|
|
; blr x11
|
|
; ldr x12, #0xa0
|
|
; b #0xa8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g6 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; ldur q0, [sp]
|
|
; blr x12
|
|
; add sp, sp, #0x30
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f11(i128, i64) -> i64 {
|
|
block0(v0: i128, v1: i64):
|
|
v2, v3 = isplit v0
|
|
return v3
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mov x0, x1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x0, x1
|
|
; ret
|
|
|
|
function %f11_call(i64) -> i64 {
|
|
fn0 = %f11(i128, i64) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 42
|
|
v2 = iconcat v1, v0
|
|
v3 = call fn0(v2, v1)
|
|
return v3
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x1, x0
|
|
; movz x2, #42
|
|
; load_ext_name x4, TestCase(%f11)+0
|
|
; mov x0, x2
|
|
; blr x4
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x1, x0
|
|
; mov x2, #0x2a
|
|
; ldr x4, #0x18
|
|
; b #0x20
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %f11 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x2
|
|
; blr x4
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f12(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2, v3 = isplit v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mov x0, x2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x0, x2
|
|
; ret
|
|
|
|
function %f12_call(i64) -> i64 {
|
|
fn0 = %f12(i64, i128) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 42
|
|
v2 = iconcat v0, v1
|
|
v3 = call fn0(v1, v2)
|
|
return v3
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x2, x0
|
|
; movz x3, #42
|
|
; load_ext_name x4, TestCase(%f12)+0
|
|
; mov x0, x3
|
|
; blr x4
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x2, x0
|
|
; mov x3, #0x2a
|
|
; ldr x4, #0x18
|
|
; b #0x20
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %f12 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x3
|
|
; blr x4
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f13(i64, i128) -> i64 apple_aarch64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2, v3 = isplit v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mov x0, x1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x0, x1
|
|
; ret
|
|
|
|
function %f13_call(i64) -> i64 apple_aarch64 {
|
|
fn0 = %f13(i64, i128) -> i64 apple_aarch64
|
|
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 42
|
|
v2 = iconcat v0, v1
|
|
v3 = call fn0(v1, v2)
|
|
return v3
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x1, x0
|
|
; movz x2, #42
|
|
; load_ext_name x4, TestCase(%f13)+0
|
|
; mov x0, x2
|
|
; blr x4
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x1, x0
|
|
; mov x2, #0x2a
|
|
; ldr x4, #0x18
|
|
; b #0x20
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %f13 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x2
|
|
; blr x4
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f14(i128, i128, i128, i64, i128) -> i128 {
|
|
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; ldr x0, [fp, #16]
|
|
; ldr x1, [fp, #24]
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; ldur x0, [x29, #0x10]
|
|
; ldur x1, [x29, #0x18]
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f14_call(i128, i64) -> i128 {
|
|
fn0 = %f14(i128, i128, i128, i64, i128) -> i128
|
|
|
|
block0(v0: i128, v1: i64):
|
|
v2 = call fn0(v0, v0, v0, v1, v0)
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x6, x2
|
|
; sub sp, sp, #16
|
|
; virtual_sp_offset_adjust 16
|
|
; str x0, [sp]
|
|
; mov x4, x0
|
|
; str x1, [sp, #8]
|
|
; mov x5, x1
|
|
; load_ext_name x10, TestCase(%f14)+0
|
|
; mov x0, x4
|
|
; mov x2, x4
|
|
; mov x1, x5
|
|
; mov x3, x5
|
|
; blr x10
|
|
; add sp, sp, #16
|
|
; virtual_sp_offset_adjust -16
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x6, x2
|
|
; sub sp, sp, #0x10
|
|
; stur x0, [sp]
|
|
; mov x4, x0
|
|
; stur x1, [sp, #8]
|
|
; mov x5, x1
|
|
; ldr x10, #0x28
|
|
; b #0x30
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %f14 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x4
|
|
; mov x2, x4
|
|
; mov x1, x5
|
|
; mov x3, x5
|
|
; blr x10
|
|
; add sp, sp, #0x10
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f15(i128, i128, i128, i64, i128) -> i128 apple_aarch64{
|
|
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; ldr x0, [fp, #16]
|
|
; ldr x1, [fp, #24]
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; ldur x0, [x29, #0x10]
|
|
; ldur x1, [x29, #0x18]
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f15_call(i128, i64) -> i128 apple_aarch64 {
|
|
fn0 = %f15(i128, i128, i128, i64, i128) -> i128 apple_aarch64
|
|
|
|
block0(v0: i128, v1: i64):
|
|
v2 = call fn0(v0, v0, v0, v1, v0)
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x6, x2
|
|
; sub sp, sp, #16
|
|
; virtual_sp_offset_adjust 16
|
|
; str x0, [sp]
|
|
; mov x4, x0
|
|
; str x1, [sp, #8]
|
|
; mov x5, x1
|
|
; load_ext_name x10, TestCase(%f15)+0
|
|
; mov x0, x4
|
|
; mov x2, x4
|
|
; mov x1, x5
|
|
; mov x3, x5
|
|
; blr x10
|
|
; add sp, sp, #16
|
|
; virtual_sp_offset_adjust -16
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x6, x2
|
|
; sub sp, sp, #0x10
|
|
; stur x0, [sp]
|
|
; mov x4, x0
|
|
; stur x1, [sp, #8]
|
|
; mov x5, x1
|
|
; ldr x10, #0x28
|
|
; b #0x30
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %f15 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; mov x0, x4
|
|
; mov x2, x4
|
|
; mov x1, x5
|
|
; mov x3, x5
|
|
; blr x10
|
|
; add sp, sp, #0x10
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f16() -> i32, i32 wasmtime_system_v {
|
|
block0:
|
|
v0 = iconst.i32 0
|
|
v1 = iconst.i32 1
|
|
return v0, v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mov x6, x0
|
|
; movz w0, #0
|
|
; movz w3, #1
|
|
; str w3, [x6]
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x6, x0
|
|
; mov w0, #0
|
|
; mov w3, #1
|
|
; stur w3, [x6]
|
|
; ret
|
|
|
|
function %f17(i64 sret) {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 42
|
|
store v1, v0
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mov x5, x8
|
|
; movz x4, #42
|
|
; str x4, [x8]
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x5, x8
|
|
; mov x4, #0x2a
|
|
; str x4, [x8]
|
|
; ret
|
|
|
|
function %f18(i64) -> i64 {
|
|
fn0 = %g(i64 sret) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; block0:
|
|
; mov x8, x0
|
|
; load_ext_name x3, TestCase(%g)+0
|
|
; blr x3
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; block1: ; offset 0x8
|
|
; mov x8, x0
|
|
; ldr x3, #0x14
|
|
; b #0x1c
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x3
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|
|
function %f18(i64 sret) {
|
|
fn0 = %g(i64 sret)
|
|
|
|
block0(v0: i64):
|
|
call fn0(v0)
|
|
return
|
|
}
|
|
|
|
; VCode:
|
|
; stp fp, lr, [sp, #-16]!
|
|
; mov fp, sp
|
|
; str x24, [sp, #-16]!
|
|
; block0:
|
|
; mov x24, x8
|
|
; load_ext_name x4, TestCase(%g)+0
|
|
; blr x4
|
|
; mov x8, x24
|
|
; ldr x24, [sp], #16
|
|
; ldp fp, lr, [sp], #16
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; stp x29, x30, [sp, #-0x10]!
|
|
; mov x29, sp
|
|
; str x24, [sp, #-0x10]!
|
|
; block1: ; offset 0xc
|
|
; mov x24, x8
|
|
; ldr x4, #0x18
|
|
; b #0x20
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; reloc_external Abs8 %g 0
|
|
; .byte 0x00, 0x00, 0x00, 0x00
|
|
; blr x4
|
|
; mov x8, x24
|
|
; ldr x24, [sp], #0x10
|
|
; ldp x29, x30, [sp], #0x10
|
|
; ret
|
|
|