cranelift: Remove booleans (#5031)

Remove the boolean types from cranelift, and the associated instructions breduce, bextend, bconst, and bint. Standardize on using 1/0 for the return value from instructions that produce scalar boolean results, and -1/0 for boolean vector elements.

Fixes #3205

Co-authored-by: Afonso Bordado <afonso360@users.noreply.github.com>
Co-authored-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
Co-authored-by: Chris Fallin <chris@cfallin.org>
This commit is contained in:
Trevor Elliott
2022-10-17 16:00:27 -07:00
committed by GitHub
parent 766ecb561e
commit 32a7593c94
242 changed files with 7695 additions and 10010 deletions

View File

@@ -7,8 +7,7 @@ function u0:0(i64, i32, i32) -> i8 system_v {
block0(v0: i64, v1: i32, v2: i32):
v6 = atomic_cas.i32 v0, v1, v2
v7 = icmp eq v6, v1
v8 = bint.i8 v7
return v8
return v7
}
; stp fp, lr, [sp, #-16]!
@@ -22,8 +21,7 @@ block0(v0: i64, v1: i32, v2: i32):
; mov x28, x2
; atomic_cas_loop_32 addr=x25, expect=x26, replacement=x28, oldval=x27, scratch=x24
; subs wzr, w27, w26
; cset x8, eq
; and w0, w8, #1
; cset x0, eq
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16

View File

@@ -304,28 +304,28 @@ block0(v0: i8):
; umov w0, v5.b[0]
; ret
function %bextend_b8() -> b32 {
function %sextend_i8() -> i32 {
block0:
v1 = bconst.b8 true
v2 = bextend.b32 v1
v1 = iconst.i8 -1
v2 = sextend.i32 v1
return v2
}
; block0:
; movz x1, #255
; movn x1, #0
; sxtb w0, w1
; ret
function %bextend_b1() -> b32 {
function %sextend_i8() -> i32 {
block0:
v1 = bconst.b1 true
v2 = bextend.b32 v1
v1 = iconst.i8 -1
v2 = sextend.i32 v1
return v2
}
; block0:
; movz x1, #1
; sbfx w0, w1, #0, #1
; movn x1, #0
; sxtb w0, w1
; ret
function %bnot_i32(i32) -> i32 {

View File

@@ -2,7 +2,7 @@ test compile precise-output
set unwind_info=false
target aarch64
function %f0(i8x16) -> b8x16 {
function %f0(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i8 0
v2 = splat.i8x16 v1
@@ -14,7 +14,7 @@ block0(v0: i8x16):
; cmeq v0.16b, v0.16b, #0
; ret
function %f0_vconst(i8x16) -> b8x16 {
function %f0_vconst(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = vconst.i8x16 0x00
v2 = icmp eq v0, v1
@@ -25,7 +25,7 @@ block0(v0: i8x16):
; cmeq v0.16b, v0.16b, #0
; ret
function %f1(i16x8) -> b16x8 {
function %f1(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i16 0
v2 = splat.i16x8 v1
@@ -37,7 +37,7 @@ block0(v0: i16x8):
; cmeq v0.8h, v0.8h, #0
; ret
function %f1_vconst(i16x8) -> b16x8 {
function %f1_vconst(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = vconst.i16x8 0x00
v2 = icmp eq v1, v0
@@ -48,7 +48,7 @@ block0(v0: i16x8):
; cmeq v0.8h, v0.8h, #0
; ret
function %f2(i32x4) -> b32x4 {
function %f2(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 0
v2 = splat.i32x4 v1
@@ -61,7 +61,7 @@ block0(v0: i32x4):
; mvn v0.16b, v3.16b
; ret
function %f2_vconst(i32x4) -> b32x4 {
function %f2_vconst(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = vconst.i32x4 0x00
v2 = icmp ne v0, v1
@@ -73,7 +73,7 @@ block0(v0: i32x4):
; mvn v0.16b, v3.16b
; ret
function %f3(i64x2) -> b64x2 {
function %f3(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i64 0
v2 = splat.i64x2 v1
@@ -86,7 +86,7 @@ block0(v0: i64x2):
; mvn v0.16b, v3.16b
; ret
function %f3_vconst(i64x2) -> b64x2 {
function %f3_vconst(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = vconst.i64x2 0x00
v2 = icmp ne v1, v0
@@ -98,7 +98,7 @@ block0(v0: i64x2):
; mvn v0.16b, v3.16b
; ret
function %f4(i8x16) -> b8x16 {
function %f4(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i8 0
v2 = splat.i8x16 v1
@@ -110,7 +110,7 @@ block0(v0: i8x16):
; cmle v0.16b, v0.16b, #0
; ret
function %f4_vconst(i8x16) -> b8x16 {
function %f4_vconst(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = vconst.i8x16 0x00
v2 = icmp sle v0, v1
@@ -121,7 +121,7 @@ block0(v0: i8x16):
; cmle v0.16b, v0.16b, #0
; ret
function %f5(i16x8) -> b16x8 {
function %f5(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i16 0
v2 = splat.i16x8 v1
@@ -133,7 +133,7 @@ block0(v0: i16x8):
; cmge v0.8h, v0.8h, #0
; ret
function %f5_vconst(i16x8) -> b16x8 {
function %f5_vconst(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = vconst.i16x8 0x00
v2 = icmp sle v1, v0
@@ -144,7 +144,7 @@ block0(v0: i16x8):
; cmge v0.8h, v0.8h, #0
; ret
function %f6(i32x4) -> b32x4 {
function %f6(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 0
v2 = splat.i32x4 v1
@@ -156,7 +156,7 @@ block0(v0: i32x4):
; cmge v0.4s, v0.4s, #0
; ret
function %f6_vconst(i32x4) -> b32x4 {
function %f6_vconst(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = vconst.i32x4 0x00
v2 = icmp sge v0, v1
@@ -167,7 +167,7 @@ block0(v0: i32x4):
; cmge v0.4s, v0.4s, #0
; ret
function %f7(i64x2) -> b64x2 {
function %f7(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i64 0
v2 = splat.i64x2 v1
@@ -179,7 +179,7 @@ block0(v0: i64x2):
; cmle v0.2d, v0.2d, #0
; ret
function %f7_vconst(i64x2) -> b64x2 {
function %f7_vconst(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = vconst.i64x2 0x00
v2 = icmp sge v1, v0
@@ -190,7 +190,7 @@ block0(v0: i64x2):
; cmle v0.2d, v0.2d, #0
; ret
function %f8(i8x16) -> b8x16 {
function %f8(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i8 0
v2 = splat.i8x16 v1
@@ -202,7 +202,7 @@ block0(v0: i8x16):
; cmlt v0.16b, v0.16b, #0
; ret
function %f8_vconst(i8x16) -> b8x16 {
function %f8_vconst(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = vconst.i8x16 0x00
v2 = icmp slt v0, v1
@@ -213,7 +213,7 @@ block0(v0: i8x16):
; cmlt v0.16b, v0.16b, #0
; ret
function %f9(i16x8) -> b16x8 {
function %f9(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i16 0
v2 = splat.i16x8 v1
@@ -225,7 +225,7 @@ block0(v0: i16x8):
; cmgt v0.8h, v0.8h, #0
; ret
function %f9_vconst(i16x8) -> b16x8 {
function %f9_vconst(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = vconst.i16x8 0x00
v2 = icmp slt v1, v0
@@ -236,7 +236,7 @@ block0(v0: i16x8):
; cmgt v0.8h, v0.8h, #0
; ret
function %f10(i32x4) -> b32x4 {
function %f10(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 0
v2 = splat.i32x4 v1
@@ -248,7 +248,7 @@ block0(v0: i32x4):
; cmgt v0.4s, v0.4s, #0
; ret
function %f10_vconst(i32x4) -> b32x4 {
function %f10_vconst(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = vconst.i32x4 0x00
v2 = icmp sgt v0, v1
@@ -259,7 +259,7 @@ block0(v0: i32x4):
; cmgt v0.4s, v0.4s, #0
; ret
function %f11(i64x2) -> b64x2 {
function %f11(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i64 0
v2 = splat.i64x2 v1
@@ -271,7 +271,7 @@ block0(v0: i64x2):
; cmlt v0.2d, v0.2d, #0
; ret
function %f11_vconst(i64x2) -> b64x2 {
function %f11_vconst(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = vconst.i64x2 0x00
v2 = icmp sgt v1, v0
@@ -282,7 +282,7 @@ block0(v0: i64x2):
; cmlt v0.2d, v0.2d, #0
; ret
function %f12(f32x4) -> b32x4 {
function %f12(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -294,7 +294,7 @@ block0(v0: f32x4):
; fcmeq v0.4s, v0.4s, #0.0
; ret
function %f12_vconst(f32x4) -> b32x4 {
function %f12_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp eq v0, v1
@@ -305,7 +305,7 @@ block0(v0: f32x4):
; fcmeq v0.4s, v0.4s, #0.0
; ret
function %f13(f64x2) -> b64x2 {
function %f13(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -317,7 +317,7 @@ block0(v0: f64x2):
; fcmeq v0.2d, v0.2d, #0.0
; ret
function %f13_vconst(f64x2) -> b64x2 {
function %f13_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp eq v1, v0
@@ -328,7 +328,7 @@ block0(v0: f64x2):
; fcmeq v0.2d, v0.2d, #0.0
; ret
function %f14(f64x2) -> b64x2 {
function %f14(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -341,7 +341,7 @@ block0(v0: f64x2):
; mvn v0.16b, v3.16b
; ret
function %f14_vconst(f64x2) -> b64x2 {
function %f14_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp ne v0, v1
@@ -353,7 +353,7 @@ block0(v0: f64x2):
; mvn v0.16b, v3.16b
; ret
function %f15(f32x4) -> b32x4 {
function %f15(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -366,7 +366,7 @@ block0(v0: f32x4):
; mvn v0.16b, v3.16b
; ret
function %f15_vconst(f32x4) -> b32x4 {
function %f15_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp ne v1, v0
@@ -378,7 +378,7 @@ block0(v0: f32x4):
; mvn v0.16b, v3.16b
; ret
function %f16(f32x4) -> b32x4 {
function %f16(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -390,7 +390,7 @@ block0(v0: f32x4):
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f16_vconst(f32x4) -> b32x4 {
function %f16_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp le v0, v1
@@ -401,7 +401,7 @@ block0(v0: f32x4):
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f17(f64x2) -> b64x2 {
function %f17(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -413,7 +413,7 @@ block0(v0: f64x2):
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f17_vconst(f64x2) -> b64x2 {
function %f17_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp le v1, v0
@@ -424,7 +424,7 @@ block0(v0: f64x2):
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f18(f64x2) -> b64x2 {
function %f18(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -436,7 +436,7 @@ block0(v0: f64x2):
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f18_vconst(f64x2) -> b64x2 {
function %f18_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp ge v0, v1
@@ -447,7 +447,7 @@ block0(v0: f64x2):
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f19(f32x4) -> b32x4 {
function %f19(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -459,7 +459,7 @@ block0(v0: f32x4):
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f19_vconst(f32x4) -> b32x4 {
function %f19_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp ge v1, v0
@@ -470,7 +470,7 @@ block0(v0: f32x4):
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f20(f32x4) -> b32x4 {
function %f20(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -482,7 +482,7 @@ block0(v0: f32x4):
; fcmlt v0.4s, v0.4s, #0.0
; ret
function %f20_vconst(f32x4) -> b32x4 {
function %f20_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp lt v0, v1
@@ -493,7 +493,7 @@ block0(v0: f32x4):
; fcmlt v0.4s, v0.4s, #0.0
; ret
function %f21(f64x2) -> b64x2 {
function %f21(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -505,7 +505,7 @@ block0(v0: f64x2):
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f21_vconst(f64x2) -> b64x2 {
function %f21_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp lt v1, v0
@@ -516,7 +516,7 @@ block0(v0: f64x2):
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f22(f64x2) -> b64x2 {
function %f22(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = f64const 0.0
v2 = splat.f64x2 v1
@@ -528,7 +528,7 @@ block0(v0: f64x2):
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f22_vconst(f64x2) -> b64x2 {
function %f22_vconst(f64x2) -> i64x2 {
block0(v0: f64x2):
v1 = vconst.f64x2 [0.0 0.0]
v2 = fcmp gt v0, v1
@@ -539,7 +539,7 @@ block0(v0: f64x2):
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f23(f32x4) -> b32x4 {
function %f23(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = f32const 0.0
v2 = splat.f32x4 v1
@@ -551,7 +551,7 @@ block0(v0: f32x4):
; fcmlt v0.4s, v0.4s, #0.0
; ret
function %f23_vconst(f32x4) -> b32x4 {
function %f23_vconst(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = vconst.f32x4 [0.0 0.0 0.0 0.0]
v2 = fcmp gt v1, v0

View File

@@ -2,7 +2,7 @@ test compile precise-output
set unwind_info=false
target aarch64
function %f(i64, i64) -> b1 {
function %f(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2
@@ -13,7 +13,7 @@ block0(v0: i64, v1: i64):
; cset x0, eq
; ret
function %icmp_eq_i128(i128, i128) -> b1 {
function %icmp_eq_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp eq v0, v1
return v2
@@ -25,7 +25,7 @@ block0(v0: i128, v1: i128):
; cset x0, eq
; ret
function %icmp_ne_i128(i128, i128) -> b1 {
function %icmp_ne_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ne v0, v1
return v2
@@ -37,7 +37,7 @@ block0(v0: i128, v1: i128):
; cset x0, ne
; ret
function %icmp_slt_i128(i128, i128) -> b1 {
function %icmp_slt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp slt v0, v1
return v2
@@ -51,7 +51,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_ult_i128(i128, i128) -> b1 {
function %icmp_ult_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ult v0, v1
return v2
@@ -65,7 +65,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_sle_i128(i128, i128) -> b1 {
function %icmp_sle_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sle v0, v1
return v2
@@ -79,7 +79,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_ule_i128(i128, i128) -> b1 {
function %icmp_ule_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ule v0, v1
return v2
@@ -93,7 +93,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_sgt_i128(i128, i128) -> b1 {
function %icmp_sgt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sgt v0, v1
return v2
@@ -107,7 +107,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_ugt_i128(i128, i128) -> b1 {
function %icmp_ugt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ugt v0, v1
return v2
@@ -121,7 +121,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_sge_i128(i128, i128) -> b1 {
function %icmp_sge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sge v0, v1
return v2
@@ -135,7 +135,7 @@ block0(v0: i128, v1: i128):
; csel x0, x7, x10, eq
; ret
function %icmp_uge_i128(i128, i128) -> b1 {
function %icmp_uge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp uge v0, v1
return v2
@@ -471,3 +471,4 @@ block1:
; b label3
; block3:
; ret

View File

@@ -737,7 +737,7 @@ block0(v0: i128, v1: i128, v2: i128):
; csdb
; ret
function %g(i8) -> b1 {
function %g(i8) -> i8 {
block0(v0: i8):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
@@ -763,15 +763,14 @@ block0(v0: i8, v1: i8, v2: i8):
; orr w0, w5, w7
; ret
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
function %i(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; block0:
; and w5, w0, #1
; subs wzr, w5, wzr
; ands wzr, w0, #255
; csel x0, x1, x2, ne
; ret
@@ -788,15 +787,14 @@ block0(v0: i32, v1: i8, v2: i8):
; csel x0, x1, x2, eq
; ret
function %i128_select(b1, i128, i128) -> i128 {
block0(v0: b1, v1: i128, v2: i128):
function %i128_select(i8, i128, i128) -> i128 {
block0(v0: i8, v1: i128, v2: i128):
v3 = select.i128 v0, v1, v2
return v3
}
; block0:
; and w8, w0, #1
; subs wzr, w8, wzr
; ands wzr, w0, #255
; csel x0, x2, x4, ne
; csel x1, x3, x5, ne
; ret

View File

@@ -2,19 +2,19 @@ test compile precise-output
set unwind_info=false
target aarch64
function %f() -> b8 {
function %f() -> i8 {
block0:
v0 = bconst.b8 true
v0 = iconst.i8 -1
return v0
}
; block0:
; movz x0, #255
; movn x0, #0
; ret
function %f() -> b16 {
function %f() -> i16 {
block0:
v0 = bconst.b16 false
v0 = iconst.i16 0
return v0
}

View File

@@ -0,0 +1,112 @@
test compile precise-output
target aarch64
function %bmask_i128_i128(i128) -> i128 {
block0(v0: i128):
v1 = bmask.i128 v0
return v1
}
; block0:
; orr x5, x0, x1
; subs xzr, x5, #0
; csetm x1, ne
; mov x0, x1
; ret
function %bmask_i128_i64(i128) -> i64 {
block0(v0: i128):
v1 = bmask.i64 v0
return v1
}
; block0:
; orr x4, x0, x1
; subs xzr, x4, #0
; csetm x0, ne
; ret
function %bmask_i128_i32(i128) -> i32 {
block0(v0: i128):
v1 = bmask.i32 v0
return v1
}
; block0:
; orr x4, x0, x1
; subs xzr, x4, #0
; csetm x0, ne
; ret
function %bmask_i128_i16(i128) -> i16 {
block0(v0: i128):
v1 = bmask.i16 v0
return v1
}
; block0:
; orr x4, x0, x1
; subs xzr, x4, #0
; csetm x0, ne
; ret
function %bmask_i128_i8(i128) -> i8 {
block0(v0: i128):
v1 = bmask.i8 v0
return v1
}
; block0:
; orr x4, x0, x1
; subs xzr, x4, #0
; csetm x0, ne
; ret
function %bmask_i64_i128(i64) -> i128 {
block0(v0: i64):
v1 = bmask.i128 v0
return v1
}
; block0:
; subs xzr, x0, #0
; csetm x1, ne
; mov x0, x1
; ret
function %bmask_i32_i128(i32) -> i128 {
block0(v0: i32):
v1 = bmask.i128 v0
return v1
}
; block0:
; subs xzr, x0, #0
; csetm x1, ne
; mov x0, x1
; ret
function %bmask_i16_i128(i16) -> i128 {
block0(v0: i16):
v1 = bmask.i128 v0
return v1
}
; block0:
; subs xzr, x0, #0
; csetm x1, ne
; mov x0, x1
; ret
function %bmask_i8_i128(i8) -> i128 {
block0(v0: i8):
v1 = bmask.i128 v0
return v1
}
; block0:
; subs xzr, x0, #0
; csetm x1, ne
; mov x0, x1
; ret

View File

@@ -10,16 +10,14 @@ function u0:0() -> i8 system_v {
block0:
v0 = iconst.i16 0xddcc
v1 = icmp.i16 ne v0, v0
v2 = bint.i8 v1
return v2
return v1
}
; block0:
; movz x2, #56780
; uxth w4, w2
; movz x6, #56780
; subs wzr, w4, w6, UXTH
; cset x9, ne
; and w0, w9, #1
; movz x1, #56780
; uxth w3, w1
; movz x5, #56780
; subs wzr, w3, w5, UXTH
; cset x0, ne
; ret

View File

@@ -10,7 +10,7 @@ block0(v0: r64):
; block0:
; ret
function %f1(r64) -> b1 {
function %f1(r64) -> i8 {
block0(v0: r64):
v1 = is_null v0
return v1
@@ -21,7 +21,7 @@ block0(v0: r64):
; cset x0, eq
; ret
function %f2(r64) -> b1 {
function %f2(r64) -> i8 {
block0(v0: r64):
v1 = is_invalid v0
return v1
@@ -43,7 +43,7 @@ block0:
; ret
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
fn0 = %f(r64) -> i8
ss0 = explicit_slot 8
block0(v0: r64, v1: r64):
@@ -74,7 +74,7 @@ block3(v7: r64, v8: r64):
; mov x2, sp
; ldr x9, [sp, #8]
; str x9, [x2]
; and w3, w0, #1
; uxtb w3, w0
; cbz x3, label1 ; b label3
; block1:
; b label2

View File

@@ -108,8 +108,8 @@ block0:
; bsl v0.16b, v0.16b, v4.16b, v5.16b
; ret
function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 {
block0(v0: b16x8, v1: i16x8, v2: i16x8):
function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8, v2: i16x8):
v3 = vselect v0, v1, v2
return v3
}
@@ -118,8 +118,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8):
; bsl v0.16b, v0.16b, v1.16b, v2.16b
; ret
function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 {
block0(v0: b32x4, v1: f32x4, v2: f32x4):
function %vselect_f32x4(i32x4, f32x4, f32x4) -> f32x4 {
block0(v0: i32x4, v1: f32x4, v2: f32x4):
v3 = vselect v0, v1, v2
return v3
}
@@ -128,8 +128,8 @@ block0(v0: b32x4, v1: f32x4, v2: f32x4):
; bsl v0.16b, v0.16b, v1.16b, v2.16b
; ret
function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 {
block0(v0: b64x2, v1: f64x2, v2: f64x2):
function %vselect_f64x2(i64x2, f64x2, f64x2) -> f64x2 {
block0(v0: i64x2, v1: f64x2, v2: f64x2):
v3 = vselect v0, v1, v2
return v3
}

View File

@@ -2,7 +2,7 @@ test compile precise-output
set enable_simd
target aarch64
function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ne_32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ne v0, v1
return v2
@@ -13,7 +13,7 @@ block0(v0: i32x4, v1: i32x4):
; mvn v0.16b, v4.16b
; ret
function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ugt v0, v1
return v2
@@ -23,7 +23,7 @@ block0(v0: i32x4, v1: i32x4):
; cmhi v0.4s, v0.4s, v1.4s
; ret
function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp sge v0, v1
return v2
@@ -33,7 +33,7 @@ block0(v0: i16x8, v1: i16x8):
; cmge v0.8h, v0.8h, v1.8h
; ret
function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp uge v0, v1
return v2

View File

@@ -59,10 +59,10 @@ block0(v0: i8):
; dup v0.16b, w0
; ret
function %splat_b16() -> b16x8 {
function %splat_i16() -> i16x8 {
block0:
v0 = bconst.b16 true
v1 = splat.b16x8 v0
v0 = iconst.i16 -1
v1 = splat.i16x8 v0
return v1
}

View File

@@ -2,8 +2,8 @@ test compile precise-output
set enable_simd
target aarch64
function %bnot_b32x4(b32x4) -> b32x4 {
block0(v0: b32x4):
function %bnot_i32x4(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = bnot v0
return v1
}
@@ -12,8 +12,8 @@ block0(v0: b32x4):
; mvn v0.16b, v0.16b
; ret
function %vany_true_b32x4(b32x4) -> b1 {
block0(v0: b32x4):
function %vany_true_i32x4(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vany_true v0
return v1
}
@@ -25,7 +25,7 @@ block0(v0: b32x4):
; cset x0, ne
; ret
function %vall_true_i64x2(i64x2) -> b1 {
function %vall_true_i64x2(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vall_true v0
return v1

View File

@@ -2,8 +2,8 @@ test compile precise-output
set unwind_info=false
target aarch64
function %fn0(b8x8) -> b1 {
block0(v0: b8x8):
function %fn0(i8x8) -> i8 {
block0(v0: i8x8):
v1 = vall_true v0
return v1
}
@@ -15,8 +15,8 @@ block0(v0: b8x8):
; cset x0, ne
; ret
function %fn1(b8x16) -> b1 {
block0(v0: b8x16):
function %fn1(i8x16) -> i8 {
block0(v0: i8x16):
v1 = vall_true v0
return v1
}
@@ -28,8 +28,8 @@ block0(v0: b8x16):
; cset x0, ne
; ret
function %fn2(b16x4) -> b1 {
block0(v0: b16x4):
function %fn2(i16x4) -> i8 {
block0(v0: i16x4):
v1 = vall_true v0
return v1
}
@@ -41,8 +41,8 @@ block0(v0: b16x4):
; cset x0, ne
; ret
function %fn3(b16x8) -> b1 {
block0(v0: b16x8):
function %fn3(i16x8) -> i8 {
block0(v0: i16x8):
v1 = vall_true v0
return v1
}
@@ -54,8 +54,8 @@ block0(v0: b16x8):
; cset x0, ne
; ret
function %fn4(b32x2) -> b1 {
block0(v0: b32x2):
function %fn4(i32x2) -> i8 {
block0(v0: i32x2):
v1 = vall_true v0
return v1
}
@@ -67,8 +67,8 @@ block0(v0: b32x2):
; cset x0, ne
; ret
function %fn5(b32x4) -> b1 {
block0(v0: b32x4):
function %fn5(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vall_true v0
return v1
}
@@ -80,8 +80,8 @@ block0(v0: b32x4):
; cset x0, ne
; ret
function %fn6(b64x2) -> b1 {
block0(v0: b64x2):
function %fn6(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vall_true v0
return v1
}
@@ -92,3 +92,4 @@ block0(v0: b64x2):
; fcmp d5, d5
; cset x0, eq
; ret

View File

@@ -28,18 +28,6 @@ block0:
; dup v0.8h, w2
; ret
function %f3() -> b8x16 {
block0:
v0 = bconst.b32 true
v1 = breduce.b8 v0
v2 = splat.b8x16 v1
return v2
}
; block0:
; movi v0.16b, #255
; ret
function %f4(i32, i8x16, i8x16) -> i8x16 {
block0(v0: i32, v1: i8x16, v2: i8x16):
v3 = select v0, v1, v2

View File

@@ -123,10 +123,10 @@ block0(v0: i64):
; ldp fp, lr, [sp], #16
; ret
function %b1_spill_slot(b1) -> b1, i64 {
function %i8_spill_slot(i8) -> i8, i64 {
ss0 = explicit_slot 1000
block0(v0: b1):
block0(v0: i8):
v1 = iconst.i64 1
v2 = iconst.i64 2
v3 = iconst.i64 3

View File

@@ -315,28 +315,6 @@ block0(v0: i8):
; mv a0,a3
; ret
function %bextend_b8() -> b32 {
block0:
v1 = bconst.b8 true
v2 = bextend.b32 v1
return v2
}
; block0:
; li a0,-1
; ret
function %bextend_b1() -> b32 {
block0:
v1 = bconst.b1 true
v2 = bextend.b32 v1
return v2
}
; block0:
; li a0,-1
; ret
function %bnot_i32(i32) -> i32 {
block0(v0: i32):
v1 = bnot v0

View File

@@ -2,7 +2,7 @@ test compile precise-output
set unwind_info=false
target riscv64
function %f(i64, i64) -> b1 {
function %f(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2
@@ -12,7 +12,7 @@ block0(v0: i64, v1: i64):
; eq a0,a0,a1##ty=i64
; ret
function %icmp_eq_i128(i128, i128) -> b1 {
function %icmp_eq_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp eq v0, v1
return v2
@@ -22,7 +22,7 @@ block0(v0: i128, v1: i128):
; eq a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ne_i128(i128, i128) -> b1 {
function %icmp_ne_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ne v0, v1
return v2
@@ -32,7 +32,7 @@ block0(v0: i128, v1: i128):
; ne a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_slt_i128(i128, i128) -> b1 {
function %icmp_slt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp slt v0, v1
return v2
@@ -42,7 +42,7 @@ block0(v0: i128, v1: i128):
; slt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ult_i128(i128, i128) -> b1 {
function %icmp_ult_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ult v0, v1
return v2
@@ -52,7 +52,7 @@ block0(v0: i128, v1: i128):
; ult a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sle_i128(i128, i128) -> b1 {
function %icmp_sle_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sle v0, v1
return v2
@@ -62,7 +62,7 @@ block0(v0: i128, v1: i128):
; sle a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ule_i128(i128, i128) -> b1 {
function %icmp_ule_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ule v0, v1
return v2
@@ -72,7 +72,7 @@ block0(v0: i128, v1: i128):
; ule a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sgt_i128(i128, i128) -> b1 {
function %icmp_sgt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sgt v0, v1
return v2
@@ -82,7 +82,7 @@ block0(v0: i128, v1: i128):
; sgt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ugt_i128(i128, i128) -> b1 {
function %icmp_ugt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp ugt v0, v1
return v2
@@ -92,7 +92,7 @@ block0(v0: i128, v1: i128):
; ugt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sge_i128(i128, i128) -> b1 {
function %icmp_sge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp sge v0, v1
return v2
@@ -102,7 +102,7 @@ block0(v0: i128, v1: i128):
; sge a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_uge_i128(i128, i128) -> b1 {
function %icmp_uge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp uge v0, v1
return v2
@@ -209,8 +209,9 @@ block1:
}
; block0:
; eq a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; eq a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -228,8 +229,9 @@ block1:
}
; block0:
; ne a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; ne a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -247,8 +249,9 @@ block1:
}
; block0:
; slt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; slt a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -266,8 +269,9 @@ block1:
}
; block0:
; ult a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; ult a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -285,8 +289,9 @@ block1:
}
; block0:
; sle a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; sle a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -304,8 +309,9 @@ block1:
}
; block0:
; ule a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; ule a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -323,8 +329,9 @@ block1:
}
; block0:
; sgt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; sgt a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -342,8 +349,9 @@ block1:
}
; block0:
; ugt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; ugt a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -361,8 +369,9 @@ block1:
}
; block0:
; sge a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; sge a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
@@ -380,8 +389,9 @@ block1:
}
; block0:
; uge a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; uge a3,[a0,a1],[a2,a3]##ty=i128
; andi a3,a3,255
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:

View File

@@ -18,7 +18,7 @@ block0(v0: i8, v1: i64, v2: i64):
; selectif a0,a1,a2##test=t4
; ret
function %g(i8) -> b1 {
function %g(i8) -> i8 {
block0(v0: i8):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
@@ -48,14 +48,15 @@ block0(v0: i8, v1: i8, v2: i8):
; or a0,a2,a6
; ret
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
function %i(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; block0:
; select_i8 a0,a1,a2##condition=a0
; andi a3,a0,255
; select_i8 a0,a1,a2##condition=a3
; ret
function %i(i32, i8, i8) -> i8 {
@@ -67,20 +68,22 @@ block0(v0: i32, v1: i8, v2: i8):
}
; block0:
; li a3,42
; uext.w a5,a0
; uext.w a7,a3
; eq t4,a5,a7##ty=i32
; select_i8 a0,a1,a2##condition=t4
; li a4,42
; uext.w a6,a0
; uext.w t3,a4
; eq t0,a6,t3##ty=i32
; andi a6,t0,255
; select_i8 a0,a1,a2##condition=a6
; ret
function %i128_select(b1, i128, i128) -> i128 {
block0(v0: b1, v1: i128, v2: i128):
function %i128_select(i8, i128, i128) -> i128 {
block0(v0: i8, v1: i128, v2: i128):
v3 = select.i128 v0, v1, v2
return v3
}
; block0:
; select_i128 [a0,a1],[a1,a2],[a3,a4]##condition=a0
; andi a5,a0,255
; select_i128 [a0,a1],[a1,a2],[a3,a4]##condition=a5
; ret

View File

@@ -2,9 +2,9 @@ test compile precise-output
set unwind_info=false
target riscv64
function %f() -> b8 {
function %f() -> i8 {
block0:
v0 = bconst.b8 true
v0 = iconst.i8 -1
return v0
}
@@ -12,9 +12,9 @@ block0:
; li a0,-1
; ret
function %f() -> b16 {
function %f() -> i16 {
block0:
v0 = bconst.b16 false
v0 = iconst.i16 0
return v0
}

View File

@@ -13,16 +13,17 @@ block0(v0: i64, v1: i32):
}
; block0:
; uext.w t3,a1
; ld t4,0(a0)
; addi t4,t4,0
; ugt t0,t3,t4##ty=i64
; beq t0,zero,taken(label1),not_taken(label2)
; uext.w t4,a1
; ld t0,0(a0)
; addi t0,t0,0
; ugt t1,t4,t0##ty=i64
; andi t1,t1,255
; beq t1,zero,taken(label1),not_taken(label2)
; block1:
; add t0,a0,t3
; ugt t3,t3,t4##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t0##test=t3
; add t1,a0,t4
; ugt t4,t4,t0##ty=i64
; li t2,0
; selectif_spectre_guard a0,t2,t1##test=t4
; ret
; block2:
; udf##trap_code=heap_oob
@@ -37,16 +38,17 @@ block0(v0: i64, v1: i32):
}
; block0:
; uext.w t3,a1
; lui a7,16
; ugt t4,t3,a7##ty=i64
; beq t4,zero,taken(label1),not_taken(label2)
; uext.w t4,a1
; lui t3,16
; ugt t0,t4,t3##ty=i64
; andi t0,t0,255
; beq t0,zero,taken(label1),not_taken(label2)
; block1:
; add t4,a0,t3
; lui a7,16
; ugt t0,t3,a7##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t4##test=t0
; add t0,a0,t4
; lui t3,16
; ugt t1,t4,t3##ty=i64
; li t2,0
; selectif_spectre_guard a0,t2,t0##test=t1
; ret
; block2:
; udf##trap_code=heap_oob

View File

@@ -0,0 +1,113 @@
test compile precise-output
set unwind_info=false
target riscv64
function %bmask_i128_i128(i128) -> i128 {
block0(v0: i128):
v1 = bmask.i128 v0
return v1
}
; block0:
; or a2,a0,a1
; li a4,-1
; select_reg a1,zero,a4##condition=(zero eq a2)
; mv a0,a1
; ret
function %bmask_i128_i64(i128) -> i64 {
block0(v0: i128):
v1 = bmask.i64 v0
return v1
}
; block0:
; or a1,a0,a1
; li a3,-1
; select_reg a0,zero,a3##condition=(zero eq a1)
; ret
function %bmask_i128_i32(i128) -> i32 {
block0(v0: i128):
v1 = bmask.i32 v0
return v1
}
; block0:
; or a1,a0,a1
; li a3,-1
; select_reg a0,zero,a3##condition=(zero eq a1)
; ret
function %bmask_i128_i16(i128) -> i16 {
block0(v0: i128):
v1 = bmask.i16 v0
return v1
}
; block0:
; or a1,a0,a1
; li a3,-1
; select_reg a0,zero,a3##condition=(zero eq a1)
; ret
function %bmask_i128_i8(i128) -> i8 {
block0(v0: i128):
v1 = bmask.i8 v0
return v1
}
; block0:
; or a1,a0,a1
; li a3,-1
; select_reg a0,zero,a3##condition=(zero eq a1)
; ret
function %bmask_i64_i128(i64) -> i128 {
block0(v0: i64):
v1 = bmask.i128 v0
return v1
}
; block0:
; li a1,-1
; select_reg a1,zero,a1##condition=(zero eq a0)
; mv a0,a1
; ret
function %bmask_i32_i128(i32) -> i128 {
block0(v0: i32):
v1 = bmask.i128 v0
return v1
}
; block0:
; li a1,-1
; select_reg a1,zero,a1##condition=(zero eq a0)
; mv a0,a1
; ret
function %bmask_i16_i128(i16) -> i128 {
block0(v0: i16):
v1 = bmask.i128 v0
return v1
}
; block0:
; li a1,-1
; select_reg a1,zero,a1##condition=(zero eq a0)
; mv a0,a1
; ret
function %bmask_i8_i128(i8) -> i128 {
block0(v0: i8):
v1 = bmask.i128 v0
return v1
}
; block0:
; li a1,-1
; select_reg a1,zero,a1##condition=(zero eq a0)
; mv a0,a1
; ret

View File

@@ -7,18 +7,16 @@ function u0:0() -> i8 system_v {
block0:
v0 = iconst.i16 0xddcc
v1 = icmp.i16 ne v0, v0
v2 = bint.i8 v1
return v2
return v1
}
; block0:
; lui t2,14
; addi t2,t2,3532
; lui a2,14
; addi a2,a2,3532
; uext.h a5,t2
; uext.h a7,a2
; ne t4,a5,a7##ty=i16
; andi a0,t4,1
; lui t1,14
; addi t1,t1,3532
; lui a1,14
; addi a1,a1,3532
; uext.h a4,t1
; uext.h a6,a1
; ne a0,a4,a6##ty=i16
; ret

View File

@@ -10,7 +10,7 @@ block0(v0: r64):
; block0:
; ret
function %f1(r64) -> b1 {
function %f1(r64) -> i8 {
block0(v0: r64):
v1 = is_null v0
return v1
@@ -20,7 +20,7 @@ block0(v0: r64):
; is_null a0,a0
; ret
function %f2(r64) -> b1 {
function %f2(r64) -> i8 {
block0(v0: r64):
v1 = is_invalid v0
return v1
@@ -41,7 +41,7 @@ block0:
; ret
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
fn0 = %f(r64) -> i8
ss0 = explicit_slot 8
block0(v0: r64, v1: r64):
@@ -65,37 +65,38 @@ block3(v7: r64, v8: r64):
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; sd s9,-8(sp)
; sd s10,-8(sp)
; add sp,-48
; block0:
; sd a0,8(nominal_sp)
; sd a1,16(nominal_sp)
; mv s9,a2
; load_sym a3,%f+0
; callind a3
; load_addr a2,nsp+0
; ld t1,8(nominal_sp)
; sd t1,0(a2)
; beq a0,zero,taken(label1),not_taken(label3)
; mv s10,a2
; load_sym a4,%f+0
; callind a4
; load_addr a3,nsp+0
; ld t2,8(nominal_sp)
; sd t2,0(a3)
; andi a4,a0,255
; beq a4,zero,taken(label1),not_taken(label3)
; block1:
; j label2
; block2:
; mv a1,t1
; mv a1,t2
; ld a0,16(nominal_sp)
; j label5
; block3:
; j label4
; block4:
; mv a0,t1
; mv a0,t2
; ld a1,16(nominal_sp)
; j label5
; block5:
; load_addr a4,nsp+0
; ld a4,0(a4)
; mv a2,s9
; sd a4,0(a2)
; load_addr a5,nsp+0
; ld a5,0(a5)
; mv a2,s10
; sd a5,0(a2)
; add sp,+48
; ld s9,-8(sp)
; ld s10,-8(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16

View File

@@ -144,10 +144,10 @@ block0(v0: i64):
; add sp,+16
; ret
function %b1_spill_slot(b1) -> b1, i64 {
function %i8_spill_slot(i8) -> i8, i64 {
ss0 = explicit_slot 1000
block0(v0: b1):
block0(v0: i8):
v1 = iconst.i64 1
v2 = iconst.i64 2
v3 = iconst.i64 3

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %f(i64, i64) -> b1 {
function %f(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2

View File

@@ -16,14 +16,14 @@ block0(v0: i8, v1: i64, v2: i64):
; locgre %r2, %r3
; br %r14
function %g(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
function %g(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; block0:
; llcr %r5, %r2
; lbr %r5, %r2
; chi %r5, 0
; lgr %r2, %r4
; locrlh %r2, %r3

View File

@@ -1,19 +1,19 @@
test compile precise-output
target s390x
function %f() -> b8 {
function %f() -> i8 {
block0:
v0 = bconst.b8 true
v0 = iconst.i8 -1
return v0
}
; block0:
; lhi %r2, 255
; lhi %r2, -1
; br %r14
function %f() -> b16 {
function %f() -> i16 {
block0:
v0 = bconst.b16 false
v0 = iconst.i16 0
return v0
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %icmp_eq_i128(i128, i128) -> b1 {
function %icmp_eq_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 eq v0, v1
return v2
@@ -15,7 +15,7 @@ block0(v0: i128, v1: i128):
; lochie %r2, 1
; br %r14
function %icmp_ne_i128(i128, i128) -> b1 {
function %icmp_ne_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 ne v0, v1
return v2
@@ -29,7 +29,7 @@ block0(v0: i128, v1: i128):
; lochine %r2, 1
; br %r14
function %icmp_slt_i128(i128, i128) -> b1 {
function %icmp_slt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 slt v0, v1
return v2
@@ -43,7 +43,7 @@ block0(v0: i128, v1: i128):
; lochil %r2, 1
; br %r14
function %icmp_sgt_i128(i128, i128) -> b1 {
function %icmp_sgt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 sgt v0, v1
return v2
@@ -57,7 +57,7 @@ block0(v0: i128, v1: i128):
; lochil %r2, 1
; br %r14
function %icmp_sle_i128(i128, i128) -> b1 {
function %icmp_sle_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 sle v0, v1
return v2
@@ -71,7 +71,7 @@ block0(v0: i128, v1: i128):
; lochinl %r2, 1
; br %r14
function %icmp_sge_i128(i128, i128) -> b1 {
function %icmp_sge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 sge v0, v1
return v2
@@ -85,7 +85,7 @@ block0(v0: i128, v1: i128):
; lochinl %r2, 1
; br %r14
function %icmp_ult_i128(i128, i128) -> b1 {
function %icmp_ult_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 ult v0, v1
return v2
@@ -99,7 +99,7 @@ block0(v0: i128, v1: i128):
; lochil %r2, 1
; br %r14
function %icmp_ugt_i128(i128, i128) -> b1 {
function %icmp_ugt_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 ugt v0, v1
return v2
@@ -113,7 +113,7 @@ block0(v0: i128, v1: i128):
; lochil %r2, 1
; br %r14
function %icmp_ule_i128(i128, i128) -> b1 {
function %icmp_ule_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 ule v0, v1
return v2
@@ -127,7 +127,7 @@ block0(v0: i128, v1: i128):
; lochinl %r2, 1
; br %r14
function %icmp_uge_i128(i128, i128) -> b1 {
function %icmp_uge_i128(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp.i128 uge v0, v1
return v2

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %icmp_slt_i64(i64, i64) -> b1 {
function %icmp_slt_i64(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = icmp.i64 slt v0, v1
return v2
@@ -13,7 +13,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_ext32(i64, i32) -> b1 {
function %icmp_slt_i64_ext32(i64, i32) -> i8 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = icmp.i64 slt v0, v2
@@ -26,7 +26,7 @@ block0(v0: i64, v1: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_imm16(i64) -> b1 {
function %icmp_slt_i64_imm16(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 1
v2 = icmp.i64 slt v0, v1
@@ -39,7 +39,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_imm32(i64) -> b1 {
function %icmp_slt_i64_imm32(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 32768
v2 = icmp.i64 slt v0, v1
@@ -52,7 +52,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_mem(i64, i64) -> b1 {
function %icmp_slt_i64_mem(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = load.i64 v1
v3 = icmp.i64 slt v0, v2
@@ -65,7 +65,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_sym(i64) -> b1 {
function %icmp_slt_i64_sym(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -80,7 +80,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_mem_ext16(i64, i64) -> b1 {
function %icmp_slt_i64_mem_ext16(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = sload16.i64 v1
v3 = icmp.i64 slt v0, v2
@@ -93,7 +93,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_sym_ext16(i64) -> b1 {
function %icmp_slt_i64_sym_ext16(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -108,7 +108,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_mem_ext32(i64, i64) -> b1 {
function %icmp_slt_i64_mem_ext32(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = sload32.i64 v1
v3 = icmp.i64 slt v0, v2
@@ -121,7 +121,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i64_sym_ext32(i64) -> b1 {
function %icmp_slt_i64_sym_ext32(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -136,7 +136,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32(i32, i32) -> b1 {
function %icmp_slt_i32(i32, i32) -> i8 {
block0(v0: i32, v1: i32):
v2 = icmp.i32 slt v0, v1
return v2
@@ -148,7 +148,7 @@ block0(v0: i32, v1: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_imm16(i32) -> b1 {
function %icmp_slt_i32_imm16(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 1
v2 = icmp.i32 slt v0, v1
@@ -161,7 +161,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_imm(i32) -> b1 {
function %icmp_slt_i32_imm(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 32768
v2 = icmp.i32 slt v0, v1
@@ -174,7 +174,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_mem(i32, i64) -> b1 {
function %icmp_slt_i32_mem(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = load.i32 v1
v3 = icmp.i32 slt v0, v2
@@ -187,7 +187,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_memoff(i32, i64) -> b1 {
function %icmp_slt_i32_memoff(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = load.i32 v1+4096
v3 = icmp.i32 slt v0, v2
@@ -200,7 +200,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_sym(i32) -> b1 {
function %icmp_slt_i32_sym(i32) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
@@ -215,7 +215,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_mem_ext16(i32, i64) -> b1 {
function %icmp_slt_i32_mem_ext16(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = sload16.i32 v1
v3 = icmp.i32 slt v0, v2
@@ -228,7 +228,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_memoff_ext16(i32, i64) -> b1 {
function %icmp_slt_i32_memoff_ext16(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = sload16.i32 v1+4096
v3 = icmp.i32 slt v0, v2
@@ -241,7 +241,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i32_sym_ext16(i32) -> b1 {
function %icmp_slt_i32_sym_ext16(i32) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
@@ -256,7 +256,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_slt_i16(i16, i16) -> b1 {
function %icmp_slt_i16(i16, i16) -> i8 {
block0(v0: i16, v1: i16):
v2 = icmp.i16 slt v0, v1
return v2
@@ -270,7 +270,7 @@ block0(v0: i16, v1: i16):
; lochil %r2, 1
; br %r14
function %icmp_slt_i16_imm(i16) -> b1 {
function %icmp_slt_i16_imm(i16) -> i8 {
block0(v0: i16):
v1 = iconst.i16 1
v2 = icmp.i16 slt v0, v1
@@ -284,7 +284,7 @@ block0(v0: i16):
; lochil %r2, 1
; br %r14
function %icmp_slt_i16_mem(i16, i64) -> b1 {
function %icmp_slt_i16_mem(i16, i64) -> i8 {
block0(v0: i16, v1: i64):
v2 = load.i16 v1
v3 = icmp.i16 slt v0, v2
@@ -298,7 +298,7 @@ block0(v0: i16, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_slt_i16_sym(i16) -> b1 {
function %icmp_slt_i16_sym(i16) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
@@ -314,7 +314,7 @@ block0(v0: i16):
; lochil %r2, 1
; br %r14
function %icmp_slt_i8(i8, i8) -> b1 {
function %icmp_slt_i8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = icmp.i8 slt v0, v1
return v2
@@ -328,7 +328,7 @@ block0(v0: i8, v1: i8):
; lochil %r2, 1
; br %r14
function %icmp_slt_i8_imm(i8) -> b1 {
function %icmp_slt_i8_imm(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i8 1
v2 = icmp.i8 slt v0, v1
@@ -342,7 +342,7 @@ block0(v0: i8):
; lochil %r2, 1
; br %r14
function %icmp_slt_i8_mem(i8, i64) -> b1 {
function %icmp_slt_i8_mem(i8, i64) -> i8 {
block0(v0: i8, v1: i64):
v2 = load.i8 v1
v3 = icmp.i8 slt v0, v2
@@ -357,7 +357,7 @@ block0(v0: i8, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64(i64, i64) -> b1 {
function %icmp_ult_i64(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = icmp.i64 ult v0, v1
return v2
@@ -369,7 +369,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_ext32(i64, i32) -> b1 {
function %icmp_ult_i64_ext32(i64, i32) -> i8 {
block0(v0: i64, v1: i32):
v2 = uextend.i64 v1
v3 = icmp.i64 ult v0, v2
@@ -382,7 +382,7 @@ block0(v0: i64, v1: i32):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_imm(i64) -> b1 {
function %icmp_ult_i64_imm(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 1
v2 = icmp.i64 ult v0, v1
@@ -395,7 +395,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_mem(i64, i64) -> b1 {
function %icmp_ult_i64_mem(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = load.i64 v1
v3 = icmp.i64 ult v0, v2
@@ -408,7 +408,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_sym(i64) -> b1 {
function %icmp_ult_i64_sym(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -423,7 +423,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_mem_ext32(i64, i64) -> b1 {
function %icmp_ult_i64_mem_ext32(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = uload32.i64 v1
v3 = icmp.i64 ult v0, v2
@@ -436,7 +436,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_sym_ext32(i64) -> b1 {
function %icmp_ult_i64_sym_ext32(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -451,7 +451,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_mem_ext16(i64, i64) -> b1 {
function %icmp_ult_i64_mem_ext16(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = uload16.i64 v1
v3 = icmp.i64 ult v0, v2
@@ -465,7 +465,7 @@ block0(v0: i64, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i64_sym_ext16(i64) -> b1 {
function %icmp_ult_i64_sym_ext16(i64) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
@@ -480,7 +480,7 @@ block0(v0: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32(i32, i32) -> b1 {
function %icmp_ult_i32(i32, i32) -> i8 {
block0(v0: i32, v1: i32):
v2 = icmp.i32 ult v0, v1
return v2
@@ -492,7 +492,7 @@ block0(v0: i32, v1: i32):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_imm(i32) -> b1 {
function %icmp_ult_i32_imm(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 1
v2 = icmp.i32 ult v0, v1
@@ -505,7 +505,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_mem(i32, i64) -> b1 {
function %icmp_ult_i32_mem(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = load.i32 v1
v3 = icmp.i32 ult v0, v2
@@ -518,7 +518,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_memoff(i32, i64) -> b1 {
function %icmp_ult_i32_memoff(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = load.i32 v1+4096
v3 = icmp.i32 ult v0, v2
@@ -531,7 +531,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_sym(i32) -> b1 {
function %icmp_ult_i32_sym(i32) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
@@ -546,7 +546,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_mem_ext16(i32, i64) -> b1 {
function %icmp_ult_i32_mem_ext16(i32, i64) -> i8 {
block0(v0: i32, v1: i64):
v2 = uload16.i32 v1
v3 = icmp.i32 ult v0, v2
@@ -560,7 +560,7 @@ block0(v0: i32, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i32_sym_ext16(i32) -> b1 {
function %icmp_ult_i32_sym_ext16(i32) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
@@ -575,7 +575,7 @@ block0(v0: i32):
; lochil %r2, 1
; br %r14
function %icmp_ult_i16(i16, i16) -> b1 {
function %icmp_ult_i16(i16, i16) -> i8 {
block0(v0: i16, v1: i16):
v2 = icmp.i16 ult v0, v1
return v2
@@ -589,7 +589,7 @@ block0(v0: i16, v1: i16):
; lochil %r2, 1
; br %r14
function %icmp_ult_i16_imm(i16) -> b1 {
function %icmp_ult_i16_imm(i16) -> i8 {
block0(v0: i16):
v1 = iconst.i16 1
v2 = icmp.i16 ult v0, v1
@@ -603,7 +603,7 @@ block0(v0: i16):
; lochil %r2, 1
; br %r14
function %icmp_ult_i16_mem(i16, i64) -> b1 {
function %icmp_ult_i16_mem(i16, i64) -> i8 {
block0(v0: i16, v1: i64):
v2 = load.i16 v1
v3 = icmp.i16 ult v0, v2
@@ -618,7 +618,7 @@ block0(v0: i16, v1: i64):
; lochil %r2, 1
; br %r14
function %icmp_ult_i16_mem(i16) -> b1 {
function %icmp_ult_i16_mem(i16) -> i8 {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
@@ -634,7 +634,7 @@ block0(v0: i16):
; lochil %r2, 1
; br %r14
function %icmp_ult_i8(i8, i8) -> b1 {
function %icmp_ult_i8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = icmp.i8 ult v0, v1
return v2
@@ -648,7 +648,7 @@ block0(v0: i8, v1: i8):
; lochil %r2, 1
; br %r14
function %icmp_ult_i8_imm(i8) -> b1 {
function %icmp_ult_i8_imm(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i8 1
v2 = icmp.i8 ult v0, v1
@@ -662,7 +662,7 @@ block0(v0: i8):
; lochil %r2, 1
; br %r14
function %icmp_ult_i8_mem(i8, i64) -> b1 {
function %icmp_ult_i8_mem(i8, i64) -> i8 {
block0(v0: i8, v1: i64):
v2 = load.i8 v1
v3 = icmp.i8 ult v0, v2

View File

@@ -10,7 +10,7 @@ block0(v0: r64, v1: r64):
; lgr %r2, %r3
; br %r14
function %f1(r64) -> b1 {
function %f1(r64) -> i8 {
block0(v0: r64):
v1 = is_null v0
return v1
@@ -22,7 +22,7 @@ block0(v0: r64):
; lochie %r2, 1
; br %r14
function %f2(r64) -> b1 {
function %f2(r64) -> i8 {
block0(v0: r64):
v1 = is_invalid v0
return v1
@@ -45,7 +45,7 @@ block0:
; br %r14
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
fn0 = %f(r64) -> i8
ss0 = explicit_slot 8
block0(v0: r64, v1: r64):
@@ -76,7 +76,7 @@ block3(v7: r64, v8: r64):
; la %r5, 160(%r15)
; lg %r3, 168(%r15)
; stg %r3, 0(%r5)
; llcr %r2, %r2
; lbr %r2, %r2
; chi %r2, 0
; jgnlh label1 ; jg label3
; block1:

View File

@@ -322,8 +322,8 @@ block0(v0: i8x16, v1: i8x16, v2: i8x16):
; vsel %v24, %v25, %v26, %v24
; br %r14
function %vselect_i64x2(b64x2, i64x2, i64x2) -> i64x2 {
block0(v0: b64x2, v1: i64x2, v2: i64x2):
function %vselect_i64x2(i64x2, i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2, v2: i64x2):
v3 = vselect.i64x2 v0, v1, v2
return v3
}
@@ -332,8 +332,8 @@ block0(v0: b64x2, v1: i64x2, v2: i64x2):
; vsel %v24, %v25, %v26, %v24
; br %r14
function %vselect_i32x4(b32x4, i32x4, i32x4) -> i32x4 {
block0(v0: b32x4, v1: i32x4, v2: i32x4):
function %vselect_i32x4(i32x4, i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4, v2: i32x4):
v3 = vselect.i32x4 v0, v1, v2
return v3
}
@@ -342,8 +342,8 @@ block0(v0: b32x4, v1: i32x4, v2: i32x4):
; vsel %v24, %v25, %v26, %v24
; br %r14
function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 {
block0(v0: b16x8, v1: i16x8, v2: i16x8):
function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8, v2: i16x8):
v3 = vselect.i16x8 v0, v1, v2
return v3
}
@@ -352,8 +352,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8):
; vsel %v24, %v25, %v26, %v24
; br %r14
function %vselect_i8x16(b8x16, i8x16, i8x16) -> i8x16 {
block0(v0: b8x16, v1: i8x16, v2: i8x16):
function %vselect_i8x16(i8x16, i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16, v2: i8x16):
v3 = vselect.i8x16 v0, v1, v2
return v3
}

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %fcmp_eq_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_eq_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 eq v0, v1
return v2
@@ -11,7 +11,7 @@ block0(v0: f64x2, v1: f64x2):
; vfcedb %v24, %v24, %v25
; br %r14
function %fcmp_ne_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ne_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ne v0, v1
return v2
@@ -22,7 +22,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_gt_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_gt_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 gt v0, v1
return v2
@@ -32,7 +32,7 @@ block0(v0: f64x2, v1: f64x2):
; vfchdb %v24, %v24, %v25
; br %r14
function %fcmp_lt_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_lt_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 lt v0, v1
return v2
@@ -42,7 +42,7 @@ block0(v0: f64x2, v1: f64x2):
; vfchdb %v24, %v25, %v24
; br %r14
function %fcmp_ge_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ge_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ge v0, v1
return v2
@@ -52,7 +52,7 @@ block0(v0: f64x2, v1: f64x2):
; vfchedb %v24, %v24, %v25
; br %r14
function %fcmp_le_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_le_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 le v0, v1
return v2
@@ -62,7 +62,7 @@ block0(v0: f64x2, v1: f64x2):
; vfchedb %v24, %v25, %v24
; br %r14
function %fcmp_ueq_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ueq_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ueq v0, v1
return v2
@@ -74,7 +74,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v6
; br %r14
function %fcmp_one_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_one_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 one v0, v1
return v2
@@ -86,7 +86,7 @@ block0(v0: f64x2, v1: f64x2):
; vo %v24, %v4, %v6
; br %r14
function %fcmp_ugt_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ugt_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ugt v0, v1
return v2
@@ -97,7 +97,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ult_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ult_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ult v0, v1
return v2
@@ -108,7 +108,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_uge_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_uge_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 uge v0, v1
return v2
@@ -119,7 +119,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ule_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ule_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ule v0, v1
return v2
@@ -130,7 +130,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ord_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_ord_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 ord v0, v1
return v2
@@ -142,7 +142,7 @@ block0(v0: f64x2, v1: f64x2):
; vo %v24, %v4, %v6
; br %r14
function %fcmp_uno_f64x2(f64x2, f64x2) -> b64x2 {
function %fcmp_uno_f64x2(f64x2, f64x2) -> i64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp.f64x2 uno v0, v1
return v2
@@ -154,7 +154,7 @@ block0(v0: f64x2, v1: f64x2):
; vno %v24, %v4, %v6
; br %r14
function %fcmp_eq_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_eq_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 eq v0, v1
return v2
@@ -164,7 +164,7 @@ block0(v0: f32x4, v1: f32x4):
; vfcesb %v24, %v24, %v25
; br %r14
function %fcmp_ne_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ne_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ne v0, v1
return v2
@@ -175,7 +175,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_gt_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_gt_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 gt v0, v1
return v2
@@ -185,7 +185,7 @@ block0(v0: f32x4, v1: f32x4):
; vfchsb %v24, %v24, %v25
; br %r14
function %fcmp_lt_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_lt_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 lt v0, v1
return v2
@@ -195,7 +195,7 @@ block0(v0: f32x4, v1: f32x4):
; vfchsb %v24, %v25, %v24
; br %r14
function %fcmp_ge_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ge_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ge v0, v1
return v2
@@ -205,7 +205,7 @@ block0(v0: f32x4, v1: f32x4):
; vfchesb %v24, %v24, %v25
; br %r14
function %fcmp_le_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_le_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 le v0, v1
return v2
@@ -215,7 +215,7 @@ block0(v0: f32x4, v1: f32x4):
; vfchesb %v24, %v25, %v24
; br %r14
function %fcmp_ueq_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ueq_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ueq v0, v1
return v2
@@ -227,7 +227,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v6
; br %r14
function %fcmp_one_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_one_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 one v0, v1
return v2
@@ -239,7 +239,7 @@ block0(v0: f32x4, v1: f32x4):
; vo %v24, %v4, %v6
; br %r14
function %fcmp_ugt_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ugt_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ugt v0, v1
return v2
@@ -250,7 +250,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ult_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ult_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ult v0, v1
return v2
@@ -261,7 +261,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_uge_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_uge_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 uge v0, v1
return v2
@@ -272,7 +272,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ule_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ule_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ule v0, v1
return v2
@@ -283,7 +283,7 @@ block0(v0: f32x4, v1: f32x4):
; vno %v24, %v4, %v4
; br %r14
function %fcmp_ord_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_ord_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 ord v0, v1
return v2
@@ -295,7 +295,7 @@ block0(v0: f32x4, v1: f32x4):
; vo %v24, %v4, %v6
; br %r14
function %fcmp_uno_f32x4(f32x4, f32x4) -> b32x4 {
function %fcmp_uno_f32x4(f32x4, f32x4) -> i32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = fcmp.f32x4 uno v0, v1
return v2

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %icmp_eq_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_eq_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 eq v0, v1
return v2
@@ -11,7 +11,7 @@ block0(v0: i64x2, v1: i64x2):
; vceqg %v24, %v24, %v25
; br %r14
function %icmp_ne_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_ne_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 ne v0, v1
return v2
@@ -22,7 +22,7 @@ block0(v0: i64x2, v1: i64x2):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sgt_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_sgt_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 sgt v0, v1
return v2
@@ -32,7 +32,7 @@ block0(v0: i64x2, v1: i64x2):
; vchg %v24, %v24, %v25
; br %r14
function %icmp_slt_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_slt_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 slt v0, v1
return v2
@@ -42,7 +42,7 @@ block0(v0: i64x2, v1: i64x2):
; vchg %v24, %v25, %v24
; br %r14
function %icmp_sge_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_sge_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 sge v0, v1
return v2
@@ -53,7 +53,7 @@ block0(v0: i64x2, v1: i64x2):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sle_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_sle_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 sle v0, v1
return v2
@@ -64,7 +64,7 @@ block0(v0: i64x2, v1: i64x2):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ugt_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_ugt_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 ugt v0, v1
return v2
@@ -74,7 +74,7 @@ block0(v0: i64x2, v1: i64x2):
; vchlg %v24, %v24, %v25
; br %r14
function %icmp_ult_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_ult_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 ult v0, v1
return v2
@@ -84,7 +84,7 @@ block0(v0: i64x2, v1: i64x2):
; vchlg %v24, %v25, %v24
; br %r14
function %icmp_uge_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_uge_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 uge v0, v1
return v2
@@ -95,7 +95,7 @@ block0(v0: i64x2, v1: i64x2):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ule_i64x2(i64x2, i64x2) -> b64x2 {
function %icmp_ule_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp.i64x2 ule v0, v1
return v2
@@ -106,7 +106,7 @@ block0(v0: i64x2, v1: i64x2):
; vno %v24, %v4, %v4
; br %r14
function %icmp_eq_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_eq_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 eq v0, v1
return v2
@@ -116,7 +116,7 @@ block0(v0: i32x4, v1: i32x4):
; vceqf %v24, %v24, %v25
; br %r14
function %icmp_ne_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ne_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 ne v0, v1
return v2
@@ -127,7 +127,7 @@ block0(v0: i32x4, v1: i32x4):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sgt_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_sgt_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 sgt v0, v1
return v2
@@ -137,7 +137,7 @@ block0(v0: i32x4, v1: i32x4):
; vchf %v24, %v24, %v25
; br %r14
function %icmp_slt_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_slt_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 slt v0, v1
return v2
@@ -147,7 +147,7 @@ block0(v0: i32x4, v1: i32x4):
; vchf %v24, %v25, %v24
; br %r14
function %icmp_sge_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_sge_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 sge v0, v1
return v2
@@ -158,7 +158,7 @@ block0(v0: i32x4, v1: i32x4):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sle_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_sle_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 sle v0, v1
return v2
@@ -169,7 +169,7 @@ block0(v0: i32x4, v1: i32x4):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 ugt v0, v1
return v2
@@ -179,7 +179,7 @@ block0(v0: i32x4, v1: i32x4):
; vchlf %v24, %v24, %v25
; br %r14
function %icmp_ult_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ult_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 ult v0, v1
return v2
@@ -189,7 +189,7 @@ block0(v0: i32x4, v1: i32x4):
; vchlf %v24, %v25, %v24
; br %r14
function %icmp_uge_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_uge_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 uge v0, v1
return v2
@@ -200,7 +200,7 @@ block0(v0: i32x4, v1: i32x4):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ule_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ule_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp.i32x4 ule v0, v1
return v2
@@ -211,7 +211,7 @@ block0(v0: i32x4, v1: i32x4):
; vno %v24, %v4, %v4
; br %r14
function %icmp_eq_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_eq_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 eq v0, v1
return v2
@@ -221,7 +221,7 @@ block0(v0: i16x8, v1: i16x8):
; vceqh %v24, %v24, %v25
; br %r14
function %icmp_ne_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_ne_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 ne v0, v1
return v2
@@ -232,7 +232,7 @@ block0(v0: i16x8, v1: i16x8):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sgt_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_sgt_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 sgt v0, v1
return v2
@@ -242,7 +242,7 @@ block0(v0: i16x8, v1: i16x8):
; vchh %v24, %v24, %v25
; br %r14
function %icmp_slt_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_slt_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 slt v0, v1
return v2
@@ -252,7 +252,7 @@ block0(v0: i16x8, v1: i16x8):
; vchh %v24, %v25, %v24
; br %r14
function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 sge v0, v1
return v2
@@ -263,7 +263,7 @@ block0(v0: i16x8, v1: i16x8):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sle_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_sle_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 sle v0, v1
return v2
@@ -274,7 +274,7 @@ block0(v0: i16x8, v1: i16x8):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ugt_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_ugt_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 ugt v0, v1
return v2
@@ -284,7 +284,7 @@ block0(v0: i16x8, v1: i16x8):
; vchlh %v24, %v24, %v25
; br %r14
function %icmp_ult_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_ult_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 ult v0, v1
return v2
@@ -294,7 +294,7 @@ block0(v0: i16x8, v1: i16x8):
; vchlh %v24, %v25, %v24
; br %r14
function %icmp_uge_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_uge_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 uge v0, v1
return v2
@@ -305,7 +305,7 @@ block0(v0: i16x8, v1: i16x8):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ule_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_ule_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp.i16x8 ule v0, v1
return v2
@@ -316,7 +316,7 @@ block0(v0: i16x8, v1: i16x8):
; vno %v24, %v4, %v4
; br %r14
function %icmp_eq_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_eq_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 eq v0, v1
return v2
@@ -326,7 +326,7 @@ block0(v0: i8x16, v1: i8x16):
; vceqb %v24, %v24, %v25
; br %r14
function %icmp_ne_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_ne_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 ne v0, v1
return v2
@@ -337,7 +337,7 @@ block0(v0: i8x16, v1: i8x16):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sgt_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_sgt_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 sgt v0, v1
return v2
@@ -347,7 +347,7 @@ block0(v0: i8x16, v1: i8x16):
; vchb %v24, %v24, %v25
; br %r14
function %icmp_slt_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_slt_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 slt v0, v1
return v2
@@ -357,7 +357,7 @@ block0(v0: i8x16, v1: i8x16):
; vchb %v24, %v25, %v24
; br %r14
function %icmp_sge_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_sge_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 sge v0, v1
return v2
@@ -368,7 +368,7 @@ block0(v0: i8x16, v1: i8x16):
; vno %v24, %v4, %v4
; br %r14
function %icmp_sle_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_sle_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 sle v0, v1
return v2
@@ -379,7 +379,7 @@ block0(v0: i8x16, v1: i8x16):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ugt_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_ugt_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 ugt v0, v1
return v2
@@ -389,7 +389,7 @@ block0(v0: i8x16, v1: i8x16):
; vchlb %v24, %v24, %v25
; br %r14
function %icmp_ult_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_ult_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 ult v0, v1
return v2
@@ -399,7 +399,7 @@ block0(v0: i8x16, v1: i8x16):
; vchlb %v24, %v25, %v24
; br %r14
function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 uge v0, v1
return v2
@@ -410,7 +410,7 @@ block0(v0: i8x16, v1: i8x16):
; vno %v24, %v4, %v4
; br %r14
function %icmp_ule_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_ule_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp.i8x16 ule v0, v1
return v2

View File

@@ -1,7 +1,7 @@
test compile precise-output
target s390x
function %vany_true_i64x2(i64x2) -> b1 {
function %vany_true_i64x2(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vany_true v0
return v1
@@ -14,7 +14,7 @@ block0(v0: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_i32x4(i32x4) -> b1 {
function %vany_true_i32x4(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vany_true v0
return v1
@@ -27,7 +27,7 @@ block0(v0: i32x4):
; lochine %r2, 1
; br %r14
function %vany_true_i16x8(i16x8) -> b1 {
function %vany_true_i16x8(i16x8) -> i8 {
block0(v0: i16x8):
v1 = vany_true v0
return v1
@@ -40,7 +40,7 @@ block0(v0: i16x8):
; lochine %r2, 1
; br %r14
function %vany_true_i8x16(i8x16) -> b1 {
function %vany_true_i8x16(i8x16) -> i8 {
block0(v0: i8x16):
v1 = vany_true v0
return v1
@@ -53,7 +53,7 @@ block0(v0: i8x16):
; lochine %r2, 1
; br %r14
function %vall_true_i64x2(i64x2) -> b1 {
function %vall_true_i64x2(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vall_true v0
return v1
@@ -66,7 +66,7 @@ block0(v0: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_i32x4(i32x4) -> b1 {
function %vall_true_i32x4(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vall_true v0
return v1
@@ -79,7 +79,7 @@ block0(v0: i32x4):
; lochio %r2, 1
; br %r14
function %vall_true_i16x8(i16x8) -> b1 {
function %vall_true_i16x8(i16x8) -> i8 {
block0(v0: i16x8):
v1 = vall_true v0
return v1
@@ -92,7 +92,7 @@ block0(v0: i16x8):
; lochio %r2, 1
; br %r14
function %vall_true_i8x16(i8x16) -> b1 {
function %vall_true_i8x16(i8x16) -> i8 {
block0(v0: i8x16):
v1 = vall_true v0
return v1
@@ -105,7 +105,7 @@ block0(v0: i8x16):
; lochio %r2, 1
; br %r14
function %vany_true_icmp_eq_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_eq_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp eq v0, v1
v3 = vany_true v2
@@ -118,7 +118,7 @@ block0(v0: i64x2, v1: i64x2):
; lochino %r2, 1
; br %r14
function %vany_true_icmp_ne_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_ne_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ne v0, v1
v3 = vany_true v2
@@ -131,7 +131,7 @@ block0(v0: i64x2, v1: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_icmp_sgt_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_sgt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sgt v0, v1
v3 = vany_true v2
@@ -144,7 +144,7 @@ block0(v0: i64x2, v1: i64x2):
; lochino %r2, 1
; br %r14
function %vany_true_icmp_sle_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_sle_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sle v0, v1
v3 = vany_true v2
@@ -157,7 +157,7 @@ block0(v0: i64x2, v1: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_icmp_slt_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_slt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp slt v0, v1
v3 = vany_true v2
@@ -170,7 +170,7 @@ block0(v0: i64x2, v1: i64x2):
; lochino %r2, 1
; br %r14
function %vany_true_icmp_sge_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_sge_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sge v0, v1
v3 = vany_true v2
@@ -183,7 +183,7 @@ block0(v0: i64x2, v1: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_icmp_ugt_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_ugt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ugt v0, v1
v3 = vany_true v2
@@ -196,7 +196,7 @@ block0(v0: i64x2, v1: i64x2):
; lochino %r2, 1
; br %r14
function %vany_true_icmp_ule_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_ule_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ule v0, v1
v3 = vany_true v2
@@ -209,7 +209,7 @@ block0(v0: i64x2, v1: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_icmp_ult_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_ult_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ult v0, v1
v3 = vany_true v2
@@ -222,7 +222,7 @@ block0(v0: i64x2, v1: i64x2):
; lochino %r2, 1
; br %r14
function %vany_true_icmp_uge_i64x2(i64x2, i64x2) -> b1 {
function %vany_true_icmp_uge_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp uge v0, v1
v3 = vany_true v2
@@ -235,7 +235,7 @@ block0(v0: i64x2, v1: i64x2):
; lochine %r2, 1
; br %r14
function %vany_true_fcmp_eq_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_eq_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp eq v0, v1
v3 = vany_true v2
@@ -248,7 +248,7 @@ block0(v0: f64x2, v1: f64x2):
; lochino %r2, 1
; br %r14
function %vany_true_fcmp_ne_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_ne_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ne v0, v1
v3 = vany_true v2
@@ -261,7 +261,7 @@ block0(v0: f64x2, v1: f64x2):
; lochine %r2, 1
; br %r14
function %vany_true_fcmp_gt_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_gt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp gt v0, v1
v3 = vany_true v2
@@ -274,7 +274,7 @@ block0(v0: f64x2, v1: f64x2):
; lochino %r2, 1
; br %r14
function %vany_true_fcmp_ule_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_ule_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ule v0, v1
v3 = vany_true v2
@@ -287,7 +287,7 @@ block0(v0: f64x2, v1: f64x2):
; lochine %r2, 1
; br %r14
function %vany_true_fcmp_ge_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_ge_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ge v0, v1
v3 = vany_true v2
@@ -300,7 +300,7 @@ block0(v0: f64x2, v1: f64x2):
; lochino %r2, 1
; br %r14
function %vany_true_fcmp_ult_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_ult_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ult v0, v1
v3 = vany_true v2
@@ -313,7 +313,7 @@ block0(v0: f64x2, v1: f64x2):
; lochine %r2, 1
; br %r14
function %vany_true_fcmp_lt_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_lt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp lt v0, v1
v3 = vany_true v2
@@ -326,7 +326,7 @@ block0(v0: f64x2, v1: f64x2):
; lochino %r2, 1
; br %r14
function %vany_true_fcmp_uge_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_uge_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp uge v0, v1
v3 = vany_true v2
@@ -339,7 +339,7 @@ block0(v0: f64x2, v1: f64x2):
; lochine %r2, 1
; br %r14
function %vany_true_fcmp_le_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_le_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp le v0, v1
v3 = vany_true v2
@@ -352,7 +352,7 @@ block0(v0: f64x2, v1: f64x2):
; lochino %r2, 1
; br %r14
function %vany_true_fcmp_ugt_f64x2(f64x2, f64x2) -> b1 {
function %vany_true_fcmp_ugt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ugt v0, v1
v3 = vany_true v2
@@ -365,7 +365,7 @@ block0(v0: f64x2, v1: f64x2):
; lochine %r2, 1
; br %r14
function %vall_true_icmp_eq_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_eq_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp eq v0, v1
v3 = vall_true v2
@@ -378,7 +378,7 @@ block0(v0: i64x2, v1: i64x2):
; lochie %r2, 1
; br %r14
function %vall_true_icmp_ne_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_ne_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ne v0, v1
v3 = vall_true v2
@@ -391,7 +391,7 @@ block0(v0: i64x2, v1: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_icmp_sgt_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_sgt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sgt v0, v1
v3 = vall_true v2
@@ -404,7 +404,7 @@ block0(v0: i64x2, v1: i64x2):
; lochie %r2, 1
; br %r14
function %vall_true_icmp_sle_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_sle_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sle v0, v1
v3 = vall_true v2
@@ -417,7 +417,7 @@ block0(v0: i64x2, v1: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_icmp_slt_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_slt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp slt v0, v1
v3 = vall_true v2
@@ -430,7 +430,7 @@ block0(v0: i64x2, v1: i64x2):
; lochie %r2, 1
; br %r14
function %vall_true_icmp_sge_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_sge_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp sge v0, v1
v3 = vall_true v2
@@ -443,7 +443,7 @@ block0(v0: i64x2, v1: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_icmp_ugt_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_ugt_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ugt v0, v1
v3 = vall_true v2
@@ -456,7 +456,7 @@ block0(v0: i64x2, v1: i64x2):
; lochie %r2, 1
; br %r14
function %vall_true_icmp_ule_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_ule_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ule v0, v1
v3 = vall_true v2
@@ -469,7 +469,7 @@ block0(v0: i64x2, v1: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_icmp_ult_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_ult_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp ult v0, v1
v3 = vall_true v2
@@ -482,7 +482,7 @@ block0(v0: i64x2, v1: i64x2):
; lochie %r2, 1
; br %r14
function %vall_true_icmp_uge_i64x2(i64x2, i64x2) -> b1 {
function %vall_true_icmp_uge_i64x2(i64x2, i64x2) -> i8 {
block0(v0: i64x2, v1: i64x2):
v2 = icmp uge v0, v1
v3 = vall_true v2
@@ -495,7 +495,7 @@ block0(v0: i64x2, v1: i64x2):
; lochio %r2, 1
; br %r14
function %vall_true_fcmp_eq_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_eq_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp eq v0, v1
v3 = vall_true v2
@@ -508,7 +508,7 @@ block0(v0: f64x2, v1: f64x2):
; lochie %r2, 1
; br %r14
function %vall_true_fcmp_ne_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_ne_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ne v0, v1
v3 = vall_true v2
@@ -521,7 +521,7 @@ block0(v0: f64x2, v1: f64x2):
; lochio %r2, 1
; br %r14
function %vall_true_fcmp_gt_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_gt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp gt v0, v1
v3 = vall_true v2
@@ -534,7 +534,7 @@ block0(v0: f64x2, v1: f64x2):
; lochie %r2, 1
; br %r14
function %vall_true_fcmp_ule_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_ule_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ule v0, v1
v3 = vall_true v2
@@ -547,7 +547,7 @@ block0(v0: f64x2, v1: f64x2):
; lochio %r2, 1
; br %r14
function %vall_true_fcmp_ge_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_ge_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ge v0, v1
v3 = vall_true v2
@@ -560,7 +560,7 @@ block0(v0: f64x2, v1: f64x2):
; lochie %r2, 1
; br %r14
function %vall_true_fcmp_ult_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_ult_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ult v0, v1
v3 = vall_true v2
@@ -573,7 +573,7 @@ block0(v0: f64x2, v1: f64x2):
; lochio %r2, 1
; br %r14
function %vall_true_fcmp_lt_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_lt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp lt v0, v1
v3 = vall_true v2
@@ -586,7 +586,7 @@ block0(v0: f64x2, v1: f64x2):
; lochie %r2, 1
; br %r14
function %vall_true_fcmp_uge_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_uge_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp uge v0, v1
v3 = vall_true v2
@@ -599,7 +599,7 @@ block0(v0: f64x2, v1: f64x2):
; lochio %r2, 1
; br %r14
function %vall_true_fcmp_le_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_le_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp le v0, v1
v3 = vall_true v2
@@ -612,7 +612,7 @@ block0(v0: f64x2, v1: f64x2):
; lochie %r2, 1
; br %r14
function %vall_true_fcmp_ugt_f64x2(f64x2, f64x2) -> b1 {
function %vall_true_fcmp_ugt_f64x2(f64x2, f64x2) -> i8 {
block0(v0: f64x2, v1: f64x2):
v2 = fcmp ugt v0, v1
v3 = vall_true v2

View File

@@ -17,7 +17,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_0(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
v2 = shuffle v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
return v2
}
@@ -28,7 +28,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_1(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5]
v2 = shuffle v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5]
return v2
}
@@ -39,7 +39,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_2(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47]
v2 = shuffle v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47]
return v2
}
@@ -52,7 +52,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15]
v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15]
return v2
}
@@ -62,7 +62,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15]
v2 = shuffle v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15]
return v2
}
@@ -72,7 +72,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15]
v2 = shuffle v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15]
return v2
}
@@ -82,7 +82,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15]
v2 = shuffle v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15]
return v2
}
@@ -92,7 +92,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31]
v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31]
return v2
}
@@ -102,7 +102,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31]
v2 = shuffle v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31]
return v2
}
@@ -112,7 +112,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31]
v2 = shuffle v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31]
return v2
}
@@ -122,7 +122,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31]
v2 = shuffle v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31]
return v2
}
@@ -132,7 +132,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15]
v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15]
return v2
}
@@ -142,7 +142,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15]
v2 = shuffle v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15]
return v2
}
@@ -152,7 +152,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15]
v2 = shuffle v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15]
return v2
}
@@ -162,7 +162,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15]
v2 = shuffle v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15]
return v2
}
@@ -172,7 +172,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31]
v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31]
return v2
}
@@ -182,7 +182,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31]
v2 = shuffle v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31]
return v2
}
@@ -192,7 +192,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31]
v2 = shuffle v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31]
return v2
}
@@ -202,7 +202,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31]
v2 = shuffle v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31]
return v2
}
@@ -212,7 +212,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7]
v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7]
return v2
}
@@ -222,7 +222,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7]
v2 = shuffle v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7]
return v2
}
@@ -232,7 +232,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7]
v2 = shuffle v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7]
return v2
}
@@ -242,7 +242,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7]
v2 = shuffle v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7]
return v2
}
@@ -252,7 +252,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23]
v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23]
return v2
}
@@ -262,7 +262,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23]
v2 = shuffle v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23]
return v2
}
@@ -272,7 +272,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23]
v2 = shuffle v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23]
return v2
}
@@ -282,7 +282,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23]
v2 = shuffle v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23]
return v2
}
@@ -292,7 +292,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7]
v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7]
return v2
}
@@ -302,7 +302,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7]
v2 = shuffle v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7]
return v2
}
@@ -312,7 +312,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7]
v2 = shuffle v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7]
return v2
}
@@ -322,7 +322,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7]
v2 = shuffle v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7]
return v2
}
@@ -332,7 +332,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23]
v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23]
return v2
}
@@ -342,7 +342,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23]
v2 = shuffle v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23]
return v2
}
@@ -352,7 +352,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23]
v2 = shuffle v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23]
return v2
}
@@ -362,7 +362,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23]
v2 = shuffle v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23]
return v2
}
@@ -373,7 +373,7 @@ block0(v0: i8x16, v1: i8x16):
;; Special patterns that can be implemented via PACK.
function %shuffle_vpkg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 24 25 26 27 0 1 2 3 8 9 10 11]
v2 = shuffle v0, v1, [16 17 18 19 24 25 26 27 0 1 2 3 8 9 10 11]
return v2
}
@@ -383,7 +383,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 20 21 24 25 28 29 0 1 4 5 8 9 12 13]
v2 = shuffle v0, v1, [16 17 20 21 24 25 28 29 0 1 4 5 8 9 12 13]
return v2
}
@@ -393,7 +393,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 18 20 22 24 26 28 30 0 2 4 6 8 10 12 14]
v2 = shuffle v0, v1, [16 18 20 22 24 26 28 30 0 2 4 6 8 10 12 14]
return v2
}
@@ -403,7 +403,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27]
v2 = shuffle v0, v1, [0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27]
return v2
}
@@ -413,7 +413,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29]
v2 = shuffle v0, v1, [0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29]
return v2
}
@@ -423,7 +423,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30]
v2 = shuffle v0, v1, [0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30]
return v2
}
@@ -433,7 +433,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 8 9 10 11 0 1 2 3 8 9 10 11]
v2 = shuffle v0, v1, [0 1 2 3 8 9 10 11 0 1 2 3 8 9 10 11]
return v2
}
@@ -443,7 +443,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 4 5 8 9 12 13 0 1 4 5 8 9 12 13]
v2 = shuffle v0, v1, [0 1 4 5 8 9 12 13 0 1 4 5 8 9 12 13]
return v2
}
@@ -453,7 +453,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 2 4 6 8 10 12 14 0 2 4 6 8 10 12 14]
v2 = shuffle v0, v1, [0 2 4 6 8 10 12 14 0 2 4 6 8 10 12 14]
return v2
}
@@ -463,7 +463,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 24 25 26 27 16 17 18 19 24 25 26 27]
v2 = shuffle v0, v1, [16 17 18 19 24 25 26 27 16 17 18 19 24 25 26 27]
return v2
}
@@ -473,7 +473,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 20 21 24 25 28 29 16 17 20 21 24 25 28 29]
v2 = shuffle v0, v1, [16 17 20 21 24 25 28 29 16 17 20 21 24 25 28 29]
return v2
}
@@ -483,7 +483,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 18 20 22 24 26 28 30 16 18 20 22 24 26 28 30]
v2 = shuffle v0, v1, [16 18 20 22 24 26 28 30 16 18 20 22 24 26 28 30]
return v2
}

View File

@@ -16,7 +16,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_0(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
v2 = shuffle v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
return v2
}
@@ -27,7 +27,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_1(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5]
v2 = shuffle v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5]
return v2
}
@@ -38,7 +38,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_2(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47]
v2 = shuffle v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47]
return v2
}
@@ -51,7 +51,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23]
v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23]
return v2
}
@@ -61,7 +61,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23]
v2 = shuffle v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23]
return v2
}
@@ -71,7 +71,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23]
v2 = shuffle v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23]
return v2
}
@@ -81,7 +81,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23]
v2 = shuffle v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23]
return v2
}
@@ -91,7 +91,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7]
v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7]
return v2
}
@@ -101,7 +101,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7]
v2 = shuffle v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7]
return v2
}
@@ -111,7 +111,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7]
v2 = shuffle v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7]
return v2
}
@@ -121,7 +121,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7]
v2 = shuffle v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7]
return v2
}
@@ -131,7 +131,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7]
v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7]
return v2
}
@@ -141,7 +141,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7]
v2 = shuffle v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7]
return v2
}
@@ -151,7 +151,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7]
v2 = shuffle v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7]
return v2
}
@@ -161,7 +161,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7]
v2 = shuffle v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7]
return v2
}
@@ -171,7 +171,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhg_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23]
v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23]
return v2
}
@@ -181,7 +181,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhf_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23]
v2 = shuffle v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23]
return v2
}
@@ -191,7 +191,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhh_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23]
v2 = shuffle v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23]
return v2
}
@@ -201,7 +201,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrhb_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23]
v2 = shuffle v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23]
return v2
}
@@ -211,7 +211,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31]
v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31]
return v2
}
@@ -221,7 +221,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31]
v2 = shuffle v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31]
return v2
}
@@ -231,7 +231,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31]
v2 = shuffle v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31]
return v2
}
@@ -241,7 +241,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31]
v2 = shuffle v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31]
return v2
}
@@ -251,7 +251,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15]
v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15]
return v2
}
@@ -261,7 +261,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15]
v2 = shuffle v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15]
return v2
}
@@ -271,7 +271,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15]
v2 = shuffle v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15]
return v2
}
@@ -281,7 +281,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15]
v2 = shuffle v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15]
return v2
}
@@ -291,7 +291,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15]
v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15]
return v2
}
@@ -301,7 +301,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15]
v2 = shuffle v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15]
return v2
}
@@ -311,7 +311,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15]
v2 = shuffle v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15]
return v2
}
@@ -321,7 +321,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15]
v2 = shuffle v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15]
return v2
}
@@ -331,7 +331,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlg_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31]
v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31]
return v2
}
@@ -341,7 +341,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlf_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31]
v2 = shuffle v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31]
return v2
}
@@ -351,7 +351,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlh_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31]
v2 = shuffle v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31]
return v2
}
@@ -361,7 +361,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vmrlb_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31]
v2 = shuffle v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31]
return v2
}
@@ -372,7 +372,7 @@ block0(v0: i8x16, v1: i8x16):
;; Special patterns that can be implemented via PACK.
function %shuffle_vpkg_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31]
v2 = shuffle v0, v1, [4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31]
return v2
}
@@ -382,7 +382,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [2 3 6 7 10 11 14 15 18 19 22 23 26 27 30 31]
v2 = shuffle v0, v1, [2 3 6 7 10 11 14 15 18 19 22 23 26 27 30 31]
return v2
}
@@ -392,7 +392,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_xy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31]
v2 = shuffle v0, v1, [1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31]
return v2
}
@@ -402,7 +402,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [20 21 22 23 28 29 30 31 4 5 6 7 12 13 14 15]
v2 = shuffle v0, v1, [20 21 22 23 28 29 30 31 4 5 6 7 12 13 14 15]
return v2
}
@@ -412,7 +412,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [18 19 22 23 26 27 30 31 2 3 6 7 10 11 14 15]
v2 = shuffle v0, v1, [18 19 22 23 26 27 30 31 2 3 6 7 10 11 14 15]
return v2
}
@@ -422,7 +422,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_yx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [17 19 21 23 25 27 29 31 1 3 5 7 9 11 13 15]
v2 = shuffle v0, v1, [17 19 21 23 25 27 29 31 1 3 5 7 9 11 13 15]
return v2
}
@@ -432,7 +432,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [4 5 6 7 12 13 14 15 4 5 6 7 12 13 14 15]
v2 = shuffle v0, v1, [4 5 6 7 12 13 14 15 4 5 6 7 12 13 14 15]
return v2
}
@@ -442,7 +442,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [2 3 6 7 10 11 14 15 2 3 6 7 10 11 14 15]
v2 = shuffle v0, v1, [2 3 6 7 10 11 14 15 2 3 6 7 10 11 14 15]
return v2
}
@@ -452,7 +452,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_xx(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [1 3 5 7 9 11 13 15 1 3 5 7 9 11 13 15]
v2 = shuffle v0, v1, [1 3 5 7 9 11 13 15 1 3 5 7 9 11 13 15]
return v2
}
@@ -462,7 +462,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkg_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [20 21 22 23 28 29 30 31 20 21 22 23 28 29 30 31]
v2 = shuffle v0, v1, [20 21 22 23 28 29 30 31 20 21 22 23 28 29 30 31]
return v2
}
@@ -472,7 +472,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkf_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [18 19 22 23 26 27 30 31 18 19 22 23 26 27 30 31]
v2 = shuffle v0, v1, [18 19 22 23 26 27 30 31 18 19 22 23 26 27 30 31]
return v2
}
@@ -482,7 +482,7 @@ block0(v0: i8x16, v1: i8x16):
function %shuffle_vpkh_yy(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle.i8x16 v0, v1, [17 19 21 23 25 27 29 31 17 19 21 23 25 27 29 31]
v2 = shuffle v0, v1, [17 19 21 23 25 27 29 31 17 19 21 23 25 27 29 31]
return v2
}

View File

@@ -158,83 +158,74 @@ function u0:31(i64, i32, i32, i8, i8) -> i32, i32 system_v {
block9:
@000d v37 = atomic_cas.i32 v34, v35, v36
@000d v38 = icmp eq v37, v35
@000d v39 = bint.i8 v38
@000d jump block10
block10:
@000e jump block32(v37, v39)
@000e jump block32(v37, v38)
block11:
@0012 v43 = atomic_cas.i32 v40, v41, v42
@0012 v44 = icmp eq v43, v41
@0012 v45 = bint.i8 v44
@0012 jump block12
block12:
@0013 jump block32(v43, v45)
@0013 jump block32(v43, v44)
block13:
@0017 v49 = atomic_cas.i32 v46, v47, v48
@0017 v50 = icmp eq v49, v47
@0017 v51 = bint.i8 v50
@0017 jump block14
block14:
@0018 jump block32(v49, v51)
@0018 jump block32(v49, v50)
block15:
@001c v55 = atomic_cas.i32 v52, v53, v54
@001c v56 = icmp eq v55, v53
@001c v57 = bint.i8 v56
@001c jump block16
block16:
@001d jump block32(v55, v57)
@001d jump block32(v55, v56)
block17:
@0021 v61 = atomic_cas.i32 v58, v59, v60
@0021 v62 = icmp eq v61, v59
@0021 v63 = bint.i8 v62
@0021 jump block18
block18:
@0022 jump block32(v61, v63)
@0022 jump block32(v61, v62)
block19:
@0026 v67 = atomic_cas.i32 v64, v65, v66
@0026 v68 = icmp eq v67, v65
@0026 v69 = bint.i8 v68
@0026 jump block20
block20:
@0027 jump block32(v67, v69)
@0027 jump block32(v67, v68)
block21:
@002b v73 = atomic_cas.i32 v70, v71, v72
@002b v74 = icmp eq v73, v71
@002b v75 = bint.i8 v74
@002b jump block22
block22:
@002c jump block32(v73, v75)
@002c jump block32(v73, v74)
block23:
@0030 v79 = atomic_cas.i32 v76, v77, v78
@0030 v80 = icmp eq v79, v77
@0030 v81 = bint.i8 v80
@0030 jump block24
block24:
@0031 jump block32(v79, v81)
@0031 jump block32(v79, v80)
block25:
@0035 v85 = atomic_cas.i32 v82, v83, v84
@0035 v86 = icmp eq v85, v83
@0035 v87 = bint.i8 v86
@0035 jump block26
block26:
@0036 jump block32(v85, v87)
@0036 jump block32(v85, v86)
block27:
@0038 v88 = global_value.i64 gv2

View File

@@ -10,7 +10,7 @@ function u0:31() -> i32, i32 system_v {
v0 = iconst.i64 0
v1 = iconst.i32 0
v2 = iconst.i32 0
@0004 v28 = bconst.b1 false
@0004 v28 = iconst.i8 0
@0005 brnz v28, block25
jump block1

View File

@@ -126,16 +126,16 @@ block2:
; popq %rbp
; ret
function %f4(f32, f32) -> b1 {
function %f4(f32, f32) -> i8 {
block0(v0: f32, v1: f32):
v2 = fcmp eq v0, v1
brz v2, block1
jump block2
block1:
v3 = bconst.b1 true
v3 = iconst.i8 1
return v3
block2:
v4 = bconst.b1 false
v4 = iconst.i8 0
return v4
}
@@ -156,16 +156,16 @@ block2:
; popq %rbp
; ret
function %f4(f32, f32) -> b1 {
function %f4(f32, f32) -> i8 {
block0(v0: f32, v1: f32):
v2 = fcmp ne v0, v1
brz v2, block1
jump block2
block1:
v3 = bconst.b1 true
v3 = iconst.i8 1
return v3
block2:
v4 = bconst.b1 false
v4 = iconst.i8 0
return v4
}
@@ -187,18 +187,18 @@ block2:
; ret
function %f5(i32) -> b1 {
function %f5(i32) -> i8 {
jt0 = jump_table [block1, block2]
block0(v0: i32):
br_table v0, block1, jt0
block1:
v1 = bconst.b1 true
v1 = iconst.i8 1
return v1
block2:
v2 = bconst.b1 false
v2 = iconst.i8 0
return v2
}
@@ -222,17 +222,17 @@ block2:
; popq %rbp
; ret
function %f6(i64) -> b1 {
function %f6(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 0
v2 = icmp slt v0, v1
brnz v2, block1
jump block2
block1:
v3 = bconst.b1 true
v3 = iconst.i8 1
return v3
block2:
v4 = bconst.b1 false
v4 = iconst.i8 0
return v4
}
@@ -252,17 +252,17 @@ block2:
; popq %rbp
; ret
function %f7(i32) -> b1 {
function %f7(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 0
v2 = icmp slt v0, v1
brnz v2, block1
jump block2
block1:
v3 = bconst.b1 true
v3 = iconst.i8 1
return v3
block2:
v4 = bconst.b1 false
v4 = iconst.i8 0
return v4
}

View File

@@ -5,7 +5,7 @@ function %f0(i64, i64) -> i64, i64 {
block0(v0: i64, v1: i64):
v2 = load.i64 v1
v3 = icmp eq v0, v2
v4 = bint.i64 v3
v4 = uextend.i64 v3
v5 = select.i64 v3, v0, v1
return v4, v5
}
@@ -16,7 +16,7 @@ block0(v0: i64, v1: i64):
; movq 0(%rsi), %r11
; cmpq %r11, %rdi
; setz %al
; andq %rax, $1, %rax
; movzbq %al, %rax
; cmpq %r11, %rdi
; movq %rsi, %rdx
; cmovzq %rdi, %rdx, %rdx
@@ -28,7 +28,7 @@ function %f1(f64, i64) -> i64, f64 {
block0(v0: f64, v1: i64):
v2 = load.f64 v1
v3 = fcmp eq v0, v2
v4 = bint.i64 v3
v4 = uextend.i64 v3
v5 = select.f64 v3, v0, v0
return v4, v5
}
@@ -38,10 +38,10 @@ block0(v0: f64, v1: i64):
; block0:
; movsd 0(%rdi), %xmm11
; ucomisd %xmm11, %xmm0
; setnp %al
; setz %cl
; andl %eax, %ecx, %eax
; andq %rax, $1, %rax
; setnp %cl
; setz %dl
; andl %ecx, %edx, %ecx
; movzbq %cl, %rax
; ucomisd %xmm0, %xmm11
; movdqa %xmm0, %xmm12
; mov z, sd; j%xmm0 $next; mov%xmm12 %xmm12, %xmm12; $next:

View File

@@ -1,8 +1,8 @@
test compile precise-output
target x86_64
function %f0(b1, i32, i32) -> i32 {
block0(v0: b1, v1: i32, v2: i32):
function %f0(i8, i32, i32) -> i32 {
block0(v0: i8, v1: i32, v2: i32):
v3 = select.i32 v0, v1, v2
return v3
}
@@ -10,15 +10,15 @@ block0(v0: b1, v1: i32, v2: i32):
; pushq %rbp
; movq %rsp, %rbp
; block0:
; testb $1, %dil
; testb %dil, %dil
; movq %rdx, %rax
; cmovnzl %esi, %eax, %eax
; movq %rbp, %rsp
; popq %rbp
; ret
function %f1(b1) -> i32 {
block0(v0: b1):
function %f1(i8) -> i32 {
block0(v0: i8):
brnz v0, block1
jump block2
block1:
@@ -32,7 +32,7 @@ block2:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; testb $1, %dil
; testb %dil, %dil
; jnz label1; j label2
; block1:
; movl $1, %eax
@@ -45,8 +45,8 @@ block2:
; popq %rbp
; ret
function %f2(b1) -> i32 {
block0(v0: b1):
function %f2(i8) -> i32 {
block0(v0: i8):
brz v0, block1
jump block2
block1:
@@ -60,7 +60,7 @@ block2:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; testb $1, %dil
; testb %dil, %dil
; jz label1; j label2
; block1:
; movl $1, %eax
@@ -137,7 +137,7 @@ block2:
; popq %rbp
; ret
function %test_x_slt_0_i64(i64) -> b1 {
function %test_x_slt_0_i64(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 0
v2 = icmp slt v0, v1
@@ -153,7 +153,7 @@ block0(v0: i64):
; popq %rbp
; ret
function %test_x_slt_0_i32f4(i32) -> b1 {
function %test_x_slt_0_i32f4(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 0
v2 = icmp slt v0, v1
@@ -169,7 +169,7 @@ block0(v0: i32):
; popq %rbp
; ret
function %test_0_sgt_x_i64(i64) -> b1 {
function %test_0_sgt_x_i64(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 0
v2 = icmp sgt v1, v0
@@ -185,7 +185,7 @@ block0(v0: i64):
; popq %rbp
; ret
function %test_0_sgt_x_i32f4(i32) -> b1 {
function %test_0_sgt_x_i32f4(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 0
v2 = icmp sgt v1, v0
@@ -201,7 +201,7 @@ block0(v0: i32):
; popq %rbp
; ret
function %test_0_sle_x_i64(i64) -> b1 {
function %test_0_sle_x_i64(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 0
v2 = icmp sle v1, v0
@@ -218,7 +218,7 @@ block0(v0: i64):
; popq %rbp
; ret
function %test_0_sle_x_i32f4(i32) -> b1 {
function %test_0_sle_x_i32f4(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 0
v2 = icmp sle v1, v0
@@ -235,7 +235,7 @@ block0(v0: i32):
; popq %rbp
; ret
function %test_x_sge_x_i64(i64) -> b1 {
function %test_x_sge_x_i64(i64) -> i8 {
block0(v0: i64):
v1 = iconst.i64 0
v2 = icmp sge v0, v1
@@ -252,7 +252,7 @@ block0(v0: i64):
; popq %rbp
; ret
function %test_x_sge_x_i32f4(i32) -> b1 {
function %test_x_sge_x_i32f4(i32) -> i8 {
block0(v0: i32):
v1 = iconst.i32 0
v2 = icmp sge v0, v1

View File

@@ -223,9 +223,9 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast {
v394 -> v99
v395 -> v356
@4b666c v16 = icmp sle v14, v15
@4b666c v17 = bint.i32 v16
@4b666c v17 = uextend.i32 v16
@4b6671 v19 = icmp sle v18, v15
@4b6671 v20 = bint.i32 v19
@4b6671 v20 = uextend.i32 v19
@4b6672 v21 = bor v17, v20
@4b6674 brnz v21, block9
@4b6674 jump block10
@@ -272,8 +272,7 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast {
@4b6695 v37 = iadd.i64 v438, v443
@4b6695 v38 = load.f32 little v37+68
@4b6698 v39 = fcmp.f32 gt v32, v38
@4b6698 v40 = bint.i32 v39
@4b669a brnz v40, block14
@4b669a brnz v39, block14
@4b669a jump block15
block15:
@@ -685,7 +684,7 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast {
@4b69f3 v366 = iadd.i64 v438, v534
@4b69f3 v367 = load.f32 little v366+68
@4b69f6 v368 = fcmp gt v362, v367
@4b69f6 v369 = bint.i32 v368
@4b69f6 v369 = uextend.i32 v368
@4b69f9 v371 = bxor v369, v468
@4b69fb brnz v371, block71
@4b69fb jump block72

View File

@@ -159,7 +159,7 @@ block0(v0: i128):
; popq %rbp
; ret
function %f9(i128, i128) -> b1 {
function %f9(i128, i128) -> i8 {
block0(v0: i128, v1: i128):
v2 = icmp eq v0, v1
v3 = icmp ne v0, v1
@@ -457,17 +457,16 @@ block0(v0: i128):
; popq %rbp
; ret
function %f18(b1) -> i128 {
block0(v0: b1):
v1 = bint.i128 v0
function %f18(i8) -> i128 {
block0(v0: i8):
v1 = uextend.i128 v0
return v1
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movq %rdi, %rax
; andq %rax, $1, %rax
; movzbq %dil, %rax
; xorq %rdx, %rdx, %rdx
; movq %rbp, %rsp
; popq %rbp
@@ -655,8 +654,8 @@ block0(v0: i64):
; popq %rbp
; ret
function %f23(i128, b1) -> i128 {
block0(v0: i128, v1: b1):
function %f23(i128, i8) -> i128 {
block0(v0: i128, v1: i8):
v2 = iconst.i128 0
brnz v1, block1(v2)
jump block2(v2)
@@ -675,7 +674,7 @@ block2(v6: i128):
; pushq %rbp
; movq %rsp, %rbp
; block0:
; testb $1, %dl
; testb %dl, %dl
; jnz label1; j label2
; block1:
; xorq %rax, %rax, %rax

View File

@@ -126,7 +126,7 @@ function %cmp_mem(i64) -> i64 {
block0(v0: i64):
v1 = load.i64 v0
v2 = icmp eq v0, v1
v3 = bint.i64 v2
v3 = uextend.i64 v2
return v3
}
@@ -134,8 +134,8 @@ block0(v0: i64):
; movq %rsp, %rbp
; block0:
; cmpq 0(%rdi), %rdi
; setz %al
; andq %rax, $1, %rax
; setz %r8b
; movzbq %r8b, %rax
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -2,14 +2,14 @@ test compile precise-output
set enable_simd
target x86_64 skylake
function %move_registers(i32x4) -> b8x16 {
function %move_registers(i32x4) -> i8x16 {
block0(v0: i32x4):
;; In the x64 backend, all of these pseudo-instructions are lowered to moves between registers (e.g. MOVAPD, MOVDQA,
;; etc.). Because these have been marked as moves, no instructions are emitted by this function besides the prologue
;; and epilogue.
v1 = raw_bitcast.f32x4 v0
v2 = raw_bitcast.f64x2 v1
v3 = raw_bitcast.b8x16 v2
v3 = raw_bitcast.i8x16 v2
return v3
}

View File

@@ -1,9 +1,9 @@
test compile precise-output
target x86_64
function %f0(b8) -> b64 {
block0(v0: b8):
v1 = bextend.b64 v0
function %f0(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
return v1
}

View File

@@ -150,8 +150,8 @@ block0:
; popq %rbp
; ret
function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 {
block0(v0: b16x8, v1: i16x8, v2: i16x8):
function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8, v2: i16x8):
v3 = vselect v0, v1, v2
return v3
}
@@ -166,8 +166,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8):
; popq %rbp
; ret
function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 {
block0(v0: b32x4, v1: f32x4, v2: f32x4):
function %vselect_f32x4(i32x4, f32x4, f32x4) -> f32x4 {
block0(v0: i32x4, v1: f32x4, v2: f32x4):
v3 = vselect v0, v1, v2
return v3
}
@@ -182,8 +182,8 @@ block0(v0: b32x4, v1: f32x4, v2: f32x4):
; popq %rbp
; ret
function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 {
block0(v0: b64x2, v1: f64x2, v2: f64x2):
function %vselect_f64x2(i64x2, f64x2, f64x2) -> f64x2 {
block0(v0: i64x2, v1: f64x2, v2: f64x2):
v3 = vselect v0, v1, v2
return v3
}

View File

@@ -2,7 +2,7 @@ test compile precise-output
set enable_simd
target x86_64 skylake
function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ne_32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ne v0, v1
return v2
@@ -18,7 +18,7 @@ block0(v0: i32x4, v1: i32x4):
; popq %rbp
; ret
function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 {
function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ugt v0, v1
return v2
@@ -35,7 +35,7 @@ block0(v0: i32x4, v1: i32x4):
; popq %rbp
; ret
function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 {
function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp sge v0, v1
return v2
@@ -51,7 +51,7 @@ block0(v0: i16x8, v1: i16x8):
; popq %rbp
; ret
function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 {
function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp uge v0, v1
return v2

View File

@@ -80,17 +80,17 @@ block0(v0: i8):
; popq %rbp
; ret
function %splat_b16() -> b16x8 {
function %splat_i16() -> i16x8 {
block0:
v0 = bconst.b16 true
v1 = splat.b16x8 v0
v0 = iconst.i16 -1
v1 = splat.i16x8 v0
return v1
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl $65535, %edi
; movl $-1, %edi
; uninit %xmm5
; pinsrw $0, %xmm5, %rdi, %xmm5
; pinsrw $1, %xmm5, %rdi, %xmm5

View File

@@ -2,8 +2,8 @@ test compile precise-output
set enable_simd
target x86_64 skylake
function %bnot_b32x4(b32x4) -> b32x4 {
block0(v0: b32x4):
function %bnot_i32x4(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = bnot v0
return v1
}
@@ -17,8 +17,8 @@ block0(v0: b32x4):
; popq %rbp
; ret
function %vany_true_b32x4(b32x4) -> b1 {
block0(v0: b32x4):
function %vany_true_i32x4(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vany_true v0
return v1
}
@@ -32,7 +32,7 @@ block0(v0: b32x4):
; popq %rbp
; ret
function %vall_true_i64x2(i64x2) -> b1 {
function %vall_true_i64x2(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vall_true v0
return v1