* Cranelift: remove non-egraphs optimization pipeline and `use_egraphs` option. This PR removes the LICM, GVN, and preopt passes, and associated support pieces, from `cranelift-codegen`. Not to worry, we still have optimizations: the egraph framework subsumes all of these, and has been on by default since #5181. A few decision points: - Filetests for the legacy LICM, GVN and simple_preopt were removed too. As we built optimizations in the egraph framework we wrote new tests for the equivalent functionality, and many of the old tests were testing specific behaviors in the old implementations that may not be relevant anymore. However if folks prefer I could take a different approach here and try to port over all of the tests. - The corresponding filetest modes (commands) were deleted too. The `test alias_analysis` mode remains, but no longer invokes a separate GVN first (since there is no separate GVN that will not also do alias analysis) so the tests were tweaked slightly to work with that. The egrpah testsuite also covers alias analysis. - The `divconst_magic_numbers` module is removed since it's unused without `simple_preopt`, though this is the one remaining optimization we still need to build in the egraphs framework, pending #5908. The magic numbers will live forever in git history so removing this in the meantime is not a major issue IMHO. - The `use_egraphs` setting itself was removed at both the Cranelift and Wasmtime levels. It has been marked deprecated for a few releases now (Wasmtime 6.0, 7.0, upcoming 8.0, and corresponding Cranelift versions) so I think this is probably OK. As an alternative if anyone feels strongly, we could leave the setting and make it a no-op. * Update test outputs for remaining test differences.
83 lines
1.1 KiB
Plaintext
83 lines
1.1 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
target aarch64
|
|
|
|
function %f1() -> i64x2 {
|
|
block0:
|
|
v0 = iconst.i64 281474976710657
|
|
v1 = scalar_to_vector.i64x2 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; movz x2, #1
|
|
; movk x2, x2, #1, LSL #48
|
|
; fmov d0, x2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov x2, #1
|
|
; movk x2, #1, lsl #48
|
|
; fmov d0, x2
|
|
; ret
|
|
|
|
function %f2() -> i32x4 {
|
|
block0:
|
|
v0 = iconst.i32 42679
|
|
v1 = scalar_to_vector.i32x4 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; movz w1, #42679
|
|
; fmov s0, w1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mov w1, #0xa6b7
|
|
; fmov s0, w1
|
|
; ret
|
|
|
|
function %f3() -> f32x4 {
|
|
block0:
|
|
v0 = f32const 0x1.0
|
|
v1 = scalar_to_vector.f32x4 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; fmov s1, #1
|
|
; fmov s0, s1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; fmov s1, #1.00000000
|
|
; fmov s0, s1
|
|
; ret
|
|
|
|
function %f4() -> f64x2 {
|
|
block0:
|
|
v0 = f64const 0x1.0
|
|
v1 = scalar_to_vector.f64x2 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; fmov d1, #1
|
|
; fmov d0, d1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; fmov d1, #1.00000000
|
|
; fmov d0, d1
|
|
; ret
|
|
|