* aarch64: constant generation cleanup Add support for MOVZ and MOVN generation via ISLE. Handle f32const, f64const, and nop instructions via ISLE. No longer call Inst::gen_constant from lower.rs. * riscv64: constant generation cleanup Handle f32const, f64const, and nop instructions via ISLE. * s390x: constant generation cleanup Fix rule priorities for "imm" term. Only handle 32-bit stack offsets; no longer use load_constant64. * x64: constant generation cleanup No longer call Inst::gen_constant from lower.rs or abi.rs. * Refactor LowerBackend::lower to return InstOutput No longer write to the per-insn output registers; instead, return an InstOutput vector of temp registers holding the outputs. This will allow calling LowerBackend::lower multiple times for the same instruction, e.g. to rematerialize constants. When emitting the primary copy of the instruction during lowering, writing to the per-insn registers is now done in lower_clif_block. As a result, the ISLE lower_common routine is no longer needed. In addition, the InsnOutput type and all code related to it can be removed as well. * Refactor IsleContext to hold a LowerBackend reference Remove the "triple", "flags", and "isa_flags" fields that are copied from LowerBackend to each IsleContext, and instead just hold a reference to LowerBackend in IsleContext. This will allow calling LowerBackend::lower from within callbacks in src/machinst/isle.rs, e.g. to rematerialize constants. To avoid having to pass LowerBackend references through multiple functions, eliminate the lower_insn_to_regs subroutines in those targets that still have them, and just inline into the main lower routine. This also eliminates lower_inst.rs on aarch64 and riscv64. Replace all accesses to the removed IsleContext fields by going through the LowerBackend reference. * Remove MachInst::gen_constant This addresses the problem described in issue https://github.com/bytecodealliance/wasmtime/issues/4426 that targets currently have to duplicate code to emit constants between the ISLE logic and the gen_constant callback. After the various cleanups in earlier patches in this series, the only remaining user of get_constant is put_value_in_regs in Lower. This can now be removed, and instead constant rematerialization can be performed in the put_in_regs ISLE callback by simply directly calling LowerBackend::lower on the instruction defining the constant (using a different output register). Since the check for egraph mode is now no longer performed in put_value_in_regs, the Lower::flags member becomes obsolete. Care needs to be taken that other calls directly to the Lower::put_value_in_regs routine now handle the fact that no more rematerialization is performed. All such calls in target code already historically handle constants themselves. The remaining call site in the ISLE gen_call_common helper can be redirected to the ISLE put_in_regs callback. The existing target implementations of gen_constant are then unused and can be removed. (In some target there may still be further opportunities to remove duplication between ISLE and some local Rust code - this can be left to future patches.)
811 lines
14 KiB
Plaintext
811 lines
14 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
target aarch64
|
|
|
|
function %f(i8, i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8, v2: i8):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i8, i16, i16) -> i16 {
|
|
block0(v0: i8, v1: i16, v2: i16):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i8, i32, i32) -> i32 {
|
|
block0(v0: i8, v1: i32, v2: i32):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i8, i64, i64) -> i64 {
|
|
block0(v0: i8, v1: i64, v2: i64):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i8, i128, i128) -> i128 {
|
|
block0(v0: i8, v1: i128, v2: i128):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w6, w0
|
|
; subs wzr, w6, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; ret
|
|
|
|
function %f(i16, i8, i8) -> i8 {
|
|
block0(v0: i16, v1: i8, v2: i8):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i16, i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16, v2: i16):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i16, i32, i32) -> i32 {
|
|
block0(v0: i16, v1: i32, v2: i32):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i16, i64, i64) -> i64 {
|
|
block0(v0: i16, v1: i64, v2: i64):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i16, i128, i128) -> i128 {
|
|
block0(v0: i16, v1: i128, v2: i128):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w6, w0
|
|
; subs wzr, w6, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; ret
|
|
|
|
function %f(i32, i8, i8) -> i8 {
|
|
block0(v0: i32, v1: i8, v2: i8):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i32, i16, i16) -> i16 {
|
|
block0(v0: i32, v1: i16, v2: i16):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i32, i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32, v2: i32):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i32, i64, i64) -> i64 {
|
|
block0(v0: i32, v1: i64, v2: i64):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i32, i128, i128) -> i128 {
|
|
block0(v0: i32, v1: i128, v2: i128):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; ret
|
|
|
|
function %f(i64, i8, i8) -> i8 {
|
|
block0(v0: i64, v1: i8, v2: i8):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i64, i16, i16) -> i16 {
|
|
block0(v0: i64, v1: i16, v2: i16):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i64, i32, i32) -> i32 {
|
|
block0(v0: i64, v1: i32, v2: i32):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %f(i64, i128, i128) -> i128 {
|
|
block0(v0: i64, v1: i128, v2: i128):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; ret
|
|
|
|
function %f(i128, i8, i8) -> i8 {
|
|
block0(v0: i128, v1: i8, v2: i8):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select.i8 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; ret
|
|
|
|
function %f(i128, i16, i16) -> i16 {
|
|
block0(v0: i128, v1: i16, v2: i16):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select.i16 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; ret
|
|
|
|
function %f(i128, i32, i32) -> i32 {
|
|
block0(v0: i128, v1: i32, v2: i32):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select.i32 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; ret
|
|
|
|
function %f(i128, i64, i64) -> i64 {
|
|
block0(v0: i128, v1: i64, v2: i64):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select.i64 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; ret
|
|
|
|
function %f(i128, i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128, v2: i128):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select.i128 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x9, #42
|
|
; movz x11, #0
|
|
; subs xzr, x0, x9
|
|
; ccmp x1, x11, #nzcv, eq
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; ret
|
|
|
|
function %f(i8, i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8, v2: i8):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i8 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i8, i16, i16) -> i16 {
|
|
block0(v0: i8, v1: i16, v2: i16):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i16 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i8, i32, i32) -> i32 {
|
|
block0(v0: i8, v1: i32, v2: i32):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i32 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i8, i64, i64) -> i64 {
|
|
block0(v0: i8, v1: i64, v2: i64):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i64 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i8, i128, i128) -> i128 {
|
|
block0(v0: i8, v1: i128, v2: i128):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i128 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w6, w0
|
|
; subs wzr, w6, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i16, i8, i8) -> i8 {
|
|
block0(v0: i16, v1: i8, v2: i8):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i8 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i16, i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16, v2: i16):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i16 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i16, i32, i32) -> i32 {
|
|
block0(v0: i16, v1: i32, v2: i32):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i32 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i16, i64, i64) -> i64 {
|
|
block0(v0: i16, v1: i64, v2: i64):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i64 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w4, w0
|
|
; subs wzr, w4, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i16, i128, i128) -> i128 {
|
|
block0(v0: i16, v1: i128, v2: i128):
|
|
v3 = iconst.i16 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i128 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; uxth w6, w0
|
|
; subs wzr, w6, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i32, i8, i8) -> i8 {
|
|
block0(v0: i32, v1: i8, v2: i8):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i8 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i32, i16, i16) -> i16 {
|
|
block0(v0: i32, v1: i16, v2: i16):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i16 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i32, i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32, v2: i32):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i32 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i32, i64, i64) -> i64 {
|
|
block0(v0: i32, v1: i64, v2: i64):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i64 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i32, i128, i128) -> i128 {
|
|
block0(v0: i32, v1: i128, v2: i128):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i128 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i64, i8, i8) -> i8 {
|
|
block0(v0: i64, v1: i8, v2: i8):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i8 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i64, i16, i16) -> i16 {
|
|
block0(v0: i64, v1: i16, v2: i16):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i16 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i64, i32, i32) -> i32 {
|
|
block0(v0: i64, v1: i32, v2: i32):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i32 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i64 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x1, x2, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i64, i128, i128) -> i128 {
|
|
block0(v0: i64, v1: i128, v2: i128):
|
|
v3 = iconst.i64 42
|
|
v4 = icmp eq v0, v3
|
|
v5 = select_spectre_guard.i128 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs xzr, x0, #42
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i128, i8, i8) -> i8 {
|
|
block0(v0: i128, v1: i8, v2: i8):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select_spectre_guard.i8 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i128, i16, i16) -> i16 {
|
|
block0(v0: i128, v1: i16, v2: i16):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select_spectre_guard.i16 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i128, i32, i32) -> i32 {
|
|
block0(v0: i128, v1: i32, v2: i32):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select_spectre_guard.i32 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i128, i64, i64) -> i64 {
|
|
block0(v0: i128, v1: i64, v2: i64):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select_spectre_guard.i64 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x6, #42
|
|
; movz x8, #0
|
|
; subs xzr, x0, x6
|
|
; ccmp x1, x8, #nzcv, eq
|
|
; csel x0, x2, x3, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %f(i128, i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128, v2: i128):
|
|
v3 = iconst.i64 42
|
|
v4 = uextend.i128 v3
|
|
v5 = icmp eq v0, v4
|
|
v6 = select_spectre_guard.i128 v5, v1, v2
|
|
return v6
|
|
}
|
|
|
|
; block0:
|
|
; movz x9, #42
|
|
; movz x11, #0
|
|
; subs xzr, x0, x9
|
|
; ccmp x1, x11, #nzcv, eq
|
|
; csel x0, x2, x4, eq
|
|
; csel x1, x3, x5, eq
|
|
; csdb
|
|
; ret
|
|
|
|
function %g(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v3 = iconst.i8 42
|
|
v4 = icmp eq v0, v3
|
|
return v4
|
|
}
|
|
|
|
; block0:
|
|
; uxtb w2, w0
|
|
; subs wzr, w2, #42
|
|
; cset x0, eq
|
|
; ret
|
|
|
|
function %h(i8, i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8, v2: i8):
|
|
v3 = bitselect.i8 v0, v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; and w4, w1, w0
|
|
; bic w6, w2, w0
|
|
; orr w0, w4, w6
|
|
; ret
|
|
|
|
function %i(i8, i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8, v2: i8):
|
|
v3 = select.i8 v0, v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; ands wzr, w0, #255
|
|
; csel x0, x1, x2, ne
|
|
; ret
|
|
|
|
function %i(i32, i8, i8) -> i8 {
|
|
block0(v0: i32, v1: i8, v2: i8):
|
|
v3 = iconst.i32 42
|
|
v4 = icmp.i32 eq v0, v3
|
|
v5 = select.i8 v4, v1, v2
|
|
return v5
|
|
}
|
|
|
|
; block0:
|
|
; subs wzr, w0, #42
|
|
; csel x0, x1, x2, eq
|
|
; ret
|
|
|
|
function %i128_select(i8, i128, i128) -> i128 {
|
|
block0(v0: i8, v1: i128, v2: i128):
|
|
v3 = select.i128 v0, v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; ands wzr, w0, #255
|
|
; csel x0, x2, x4, ne
|
|
; csel x1, x3, x5, ne
|
|
; ret
|
|
|