cranelift: Merge all run tests into runtests dir

With this change we now reuse tests across multiple arches.

Duplicate tests were merged into the same file where possible.
Some legacy x86 tests were left in separate files due to incompatibilities with the rest of the test suite.
This commit is contained in:
Afonso Bordado
2021-06-03 20:01:38 +01:00
parent e25bf362ab
commit 214755c6a0
43 changed files with 618 additions and 1115 deletions

View File

@@ -1,78 +0,0 @@
test run
target aarch64
; i128 tests
; TODO: Cleanup these tests when we have native support for i128 immediates in CLIF's parser
function %i128_const_0() -> i64, i64 {
block0:
v1 = iconst.i128 0
v2, v3 = isplit v1
return v2, v3
}
; run: %i128_const_0() == [0, 0]
function %add_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = iadd v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %add_i128(0, 0, 0, 0) == [0, 0]
; run: %add_i128(0, -1, -1, 0) == [-1, -1]
; run: %add_i128(1, 0, 0, 0) == [1, 0]
; run: %add_i128(1, 0, 1, 0) == [2, 0]
; run: %add_i128(1, 0, -1, -1) == [0, 0]
; run: %add_i128(-1, 0, 1, 0) == [0, 1]
; run: %add_i128(0x01234567_89ABCDEF, 0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210) == [-1, -1]
; run: %add_i128(0x06060606_06060606, 0xA00A00A0_0A00A00A, 0x30303030_30303030, 0x0BB0BB0B_B0BB0BB0) == [0x36363636_36363636, 0xABBABBAB_BABBABBA]
; run: %add_i128(0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111, 0x1DCB1111_1DCB1111) == [0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF]
function %sub_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = isub v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %sub_i128(0, 0, 0, 0) == [0, 0]
; run: %sub_i128(1, 0, 1, 0) == [0, 0]
; run: %sub_i128(1, 0, 0, 0) == [1, 0]
; run: %sub_i128(0, 0, 1, 0) == [-1, -1]
; run: %sub_i128(0, 0, -1, -1) == [1, 0]
; run: %sub_i128(-1, -1, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210) == [0x01234567_89ABCDEF, 0x01234567_89ABCDEF]
; run: %sub_i128(0x36363636_36363636, 0xABBABBAB_BABBABBA, 0x30303030_30303030, 0x0BB0BB0B_B0BB0BB0) == [0x06060606_06060606, 0xA00A00A0_0A00A00A]
; run: %sub_i128(0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111, 0x1DCB1111_1DCB1111) == [0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE]
function %mul_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = imul v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %mul_i128(0, 0, 0, 0) == [0, 0]
; run: %mul_i128(1, 0, 1, 0) == [1, 0]
; run: %mul_i128(1, 0, 0, 0) == [0, 0]
; run: %mul_i128(0, 0, 1, 0) == [0, 0]
; run: %mul_i128(2, 0, 1, 0) == [2, 0]
; run: %mul_i128(2, 0, 2, 0) == [4, 0]
; run: %mul_i128(1, 0, -1, -1) == [-1, -1]
; run: %mul_i128(2, 0, -1, -1) == [-2, -1]
; run: %mul_i128(0x01010101_01010101, 0x01010101_01010101, 13, 0) == [0x0D0D0D0D_0D0D0D0D, 0x0D0D0D0D_0D0D0D0D]
; run: %mul_i128(13, 0, 0x01010101_01010101, 0x01010101_01010101) == [0x0D0D0D0D_0D0D0D0D, 0x0D0D0D0D_0D0D0D0D]
; run: %mul_i128(0x00000000_01234567, 0x89ABCDEF_00000000, 0x00000000_FEDCBA98, 0x76543210_00000000) == [0x0121FA00_23E20B28, 0xE2946058_00000000]
; run: %mul_i128(0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF) == [0xDB6B1E48_19BA1112, 0x5ECD38B5_9D1C2B7E]

View File

@@ -1,12 +0,0 @@
test run
target x86_64 machinst
function %test_compare_i32() -> b1 {
block0:
v0 = iconst.i32 42
v1 = iconst.i32 42
v2 = icmp eq v0, v1
return v2
}
; run

View File

@@ -1,166 +0,0 @@
test run
target x86_64 machinst
function %ishl(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = ishl.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %ishl(0x01010101_01010101, 0x01010101_01010101, 2) == [0x04040404_04040404, 0x04040404_04040404]
; run: %ishl(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020200, 0x02020202_02020202]
; run: %ishl(0x01010101_01010101, 0xffffffff_ffffffff, 66) == [0x00000000_00000000, 0x04040404_04040404]
; run: %ishl(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ishl(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ishl(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ishl(0x12340000_00000000, 0x56780000_00000000, 64) == [0x00000000_00000000, 0x12340000_00000000]
; run: %ishl(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_00000000, 0x00000000_12340000]
function %ushr(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = ushr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %ushr(0x01010101_01010101, 0x01010101_01010101, 2) == [0x40404040_40404040, 0x00404040_40404040]
; run: %ushr(0x01010101_01010101, 0x01010101_01010101, 66) == [0x00404040_40404040, 0x00000000_00000000]
; run: %ushr(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ushr(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ushr(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ushr(0x12340000_00000000, 0x56780000_00000000, 64) == [0x56780000_00000000, 0x00000000_00000000]
; run: %ushr(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_12340000, 0x00000000_56780000]
function %sshr(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = sshr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %sshr(0x01010101_01010101, 0x81010101_01010101, 2) == [0x40404040_40404040, 0xe0404040_40404040]
; run: %sshr(0x12345678_9abcdef0, 0x80101010_10101010, 66) == [0xe0040404_04040404, 0xffffffff_ffffffff]
; run: %sshr(0x12345678_9abcdef0, 0x80101010_10101010, 0) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr(0x12345678_9abcdef0, 0x80101010_10101010, 128) == [0x12345678_9abcdef0, 0x80101010_10101010]
function %rotl(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = rotl.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %rotl(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x01010101_01010101, 73) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
function %rotr(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = rotr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %rotr(0x01010101_01010101, 0x01010101_01010101, 9) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr(0x01010101_01010101, 0x01010101_01010101, 73) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotr(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
; i128 amount operand tests
function %ishl_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = ishl.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %ishl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 2) == [0x04040404_04040404, 0x04040404_04040404]
; run: %ishl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020200, 0x02020202_02020202]
; run: %ishl_amt_i128(0x01010101_01010101, 0xffffffff_ffffffff, 66) == [0x00000000_00000000, 0x04040404_04040404]
; run: %ishl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_amt_i128(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ishl_amt_i128(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ishl_amt_i128(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ishl_amt_i128(0x12340000_00000000, 0x56780000_00000000, 64) == [0x00000000_00000000, 0x12340000_00000000]
; run: %ishl_amt_i128(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_00000000, 0x00000000_12340000]
function %ushr_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = ushr.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %ushr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 2) == [0x40404040_40404040, 0x00404040_40404040]
; run: %ushr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 66) == [0x00404040_40404040, 0x00000000_00000000]
; run: %ushr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_amt_i128(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ushr_amt_i128(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ushr_amt_i128(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ushr_amt_i128(0x12340000_00000000, 0x56780000_00000000, 64) == [0x56780000_00000000, 0x00000000_00000000]
; run: %ushr_amt_i128(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_12340000, 0x00000000_56780000]
function %sshr_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = sshr.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %sshr_amt_i128(0x01010101_01010101, 0x81010101_01010101, 2) == [0x40404040_40404040, 0xe0404040_40404040]
; run: %sshr_amt_i128(0x12345678_9abcdef0, 0x80101010_10101010, 66) == [0xe0040404_04040404, 0xffffffff_ffffffff]
; run: %sshr_amt_i128(0x12345678_9abcdef0, 0x80101010_10101010, 0) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr_amt_i128(0x12345678_9abcdef0, 0x80101010_10101010, 128) == [0x12345678_9abcdef0, 0x80101010_10101010]
function %rotl_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = rotl.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %rotl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 73) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
function %rotr_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = rotr.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %rotr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 73) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr_amt_i128(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotr_amt_i128(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]

View File

@@ -1,59 +0,0 @@
test run
set enable_simd
target x86_64 machinst
function %bnot() -> b32 {
block0:
v0 = vconst.b32x4 [true true true false]
v1 = bnot v0
v2 = extractlane v1, 3
return v2
}
; run
function %band_not() -> b1 {
block0:
v0 = vconst.i16x8 [1 0 0 0 0 0 0 0]
v1 = vconst.i16x8 [0 0 0 0 0 0 0 0]
v2 = band_not v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 1
return v4
}
; run
function %vany_true_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [1 0 0 0 0 0 0 0]
v1 = vany_true v0
return v1
}
; run
function %vany_true_b32x4() -> b1 {
block0:
v0 = vconst.b32x4 [false false false false]
v1 = vany_true v0
v2 = bint.i32 v1
v3 = icmp_imm eq v2, 0
return v3
}
; run
function %vall_true_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [1 0 0 0 0 0 0 0]
v1 = vall_true v0
v2 = bint.i32 v1
v3 = icmp_imm eq v2, 0
return v3
}
; run
function %vall_true_b32x4() -> b1 {
block0:
v0 = vconst.b32x4 [true true true true]
v1 = vall_true v0
return v1
}
; run

View File

@@ -1,10 +0,0 @@
test run
target x86_64 legacy
; this verifies that returning b64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911
function %test_b64() -> b64 {
block0:
[-, %r10] v0 = bconst.b64 true
return v0
}
; run

View File

@@ -1,46 +0,0 @@
test run
target x86_64 legacy
function %reverse_bits_zero() -> b1 {
block0:
v0 = iconst.i64 0
v1 = iconcat v0, v0
v2 = bitrev.i128 v1
v3 = icmp eq v2, v1
return v3
}
; run
function %reverse_bits_one() -> b1 {
block0:
v0 = iconst.i64 0
v1 = iconst.i64 1
v2 = iconcat v0, v1
v3 = bitrev.i128 v2
v4 = iconst.i64 0x8000_0000_0000_0000
v5 = iconst.i64 0
v6 = iconcat v4, v5
v7 = icmp eq v3, v6
return v7
}
; run
function %reverse_bits() -> b1 {
block0:
v0 = iconst.i64 0x06AD_8667_69EC_41BA
v1 = iconst.i64 0x6C83_D81A_6E28_83AB
v2 = iconcat v0, v1
v3 = bitrev.i128 v2
v4 = iconst.i64 0xD5C11476581BC136
v5 = iconst.i64 0x5D823796E661B560
v6 = iconcat v4, v5
v7 = icmp eq v3, v6
return v7
}
; run

View File

@@ -1,14 +0,0 @@
test binemit
test run
target x86_64 legacy
function u0:323() -> b1 {
block0:
[-,%rax] v221 = bconst.b1 false ; bin: 40 b8 00000000
[-,%rcx] v222 = bconst.b1 true ; bin: 40 b9 00000001
[-,%rax] v223 = bnot v221 ; bin: 40 f7 d0
[-,%rax] v224 = band v223, v222 ; bin: 40 21 c8
return v224
}
; run

View File

@@ -1,94 +0,0 @@
test run
target x86_64 legacy haswell
function %test_icmp_eq_i128() -> b1 {
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
v21 = iconst.i64 0x0
v22 = iconst.i64 0x0
v2 = iconcat v21, v22
v10 = icmp.i128 eq v1, v2
return v10
}
; run
function %test_icmp_imm_eq_i128() -> b1 {
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
v10 = icmp_imm.i128 eq v1, 0x0
return v10
}
; run
function %test_icmp_ne_i128() -> b1 {
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
v21 = iconst.i64 0x0
v22 = iconst.i64 0x1
v2 = iconcat v21, v22
v10 = icmp.i128 ne v1, v2
return v10
}
; run
function %test_icmp_imm_ne_i128() -> b1 {
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
v10 = icmp_imm.i128 ne v1, 0x1
return v10
}
; run
function %test_icmp_nz_eq_i128() -> b1 {
block0:
v11 = iconst.i64 0x1
v12 = iconst.i64 0x1
v1 = iconcat v11, v12
v21 = iconst.i64 0x1
v22 = iconst.i64 0x1
v2 = iconcat v21, v22
v10 = icmp.i128 eq v1, v2
return v10
}
; run
function %test_icmp_nz_gt_i128() -> b1 {
block0:
v11 = iconst.i64 0x1
v12 = iconst.i64 0x1
v1 = iconcat v11, v12
v21 = iconst.i64 0x1
v22 = iconst.i64 0x2
v2 = iconcat v21, v22
v10 = icmp.i128 ugt v2, v1
return v10
}
; run
function %test_icmp_nz_ge_i128() -> b1 {
block0:
v11 = iconst.i64 0x1
v12 = iconst.i64 0x1
v1 = iconcat v11, v12
v21 = iconst.i64 0x1
v22 = iconst.i64 0x1
v2 = iconcat v21, v22
v10 = icmp.i128 uge v1, v2
return v10
}
; run

View File

@@ -1,12 +0,0 @@
test run
target x86_64 legacy
function %test_compare_i32() -> b1 {
block0:
v0 = iconst.i32 42
v1 = iconst.i32 42
v2 = icmp eq v0, v1
return v2
}
; run

View File

@@ -1,17 +0,0 @@
; Test i64 instructions on x86_32.
test compile
target i686 legacy haswell
function %iadd(i64, i64) -> i64 {
block0(v1: i64, v2: i64):
v10 = iadd v1, v2
; check: iadd_ifcout
return v10
}
function %isub(i64, i64) -> i64 {
block0(v1: i64, v2: i64):
v10 = isub v1, v2
; check: isub_ifbout
return v10
}

View File

@@ -1,279 +0,0 @@
test run
set enable_simd
target x86_64 legacy skylake
function %iadd_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0:i32x4, v1:i32x4):
v2 = iadd v0, v1
return v2
}
; run: %iadd_i32x4([1 1 1 1], [1 2 3 4]) == [2 3 4 5]
function %iadd_i8x16_with_overflow() -> i8x16 {
block0:
v0 = vconst.i8x16 [255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255]
v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]
v2 = iadd v0, v1
return v2
}
; run: %iadd_i8x16_with_overflow() == [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
function %isub_i32x4_rex() -> b1 {
block0:
[-,%xmm10] v0 = vconst.i32x4 [1 1 1 1]
[-,%xmm15] v1 = vconst.i32x4 [1 2 3 4]
v2 = isub v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0
v5 = extractlane v2, 1
v6 = icmp_imm eq v5, 0xffffffff
; TODO replace extractlanes with vector comparison
v7 = band v4, v6
return v7
}
; run
function %ineg_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [1 1 1 1]
v2 = ineg v0
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, -1
return v4
}
; run
function %imul_i64x2(i64x2, i64x2) -> i64x2 {
block0(v0: i64x2, v1: i64x2):
v2 = imul v0, v1
return v2
}
; run: %imul_i64x2([0 2], [0 2]) == [0 4]
function %imul_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [-1 0 1 0x80_00_00_01]
v1 = vconst.i32x4 [2 2 2 2]
v2 = imul v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, -2
v5 = extractlane v2, 1
v6 = icmp_imm eq v5, 0
v7 = extractlane v2, 3
v8 = icmp_imm eq v7, 2 ; 0x80_00_00_01 * 2 == 0x1_00_00_00_02 (and the 1 is dropped)
v9 = band v4, v6
v10 = band v8, v9
return v10
}
; run
function %imul_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 0 1 0x7f_ff 0 0 0 0]
v1 = vconst.i16x8 [2 2 2 2 0 0 0 0]
v2 = imul v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0xfffe ; 0xfffe == -2; -2 will not work here and below because v3 is
; being uextend-ed, not sextend-ed
v5 = extractlane v2, 1
v6 = icmp_imm eq v5, 0
v7 = extractlane v2, 3
v8 = icmp_imm eq v7, 0xfffe ; 0x7f_ff * 2 == 0xff_fe
v9 = band v4, v6
v10 = band v8, v9
return v4
}
; run
function %sadd_sat_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
v2 = sadd_sat v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 127
return v4
}
; run
function %uadd_sat_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0]
v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1]
v2 = uadd_sat v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 65535
return v4
}
; run
function %sub_sat_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] ; 128 == 0x80 == -128
v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
v2 = ssub_sat v0, v1
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0x80 ; 0x80 == -128
; now re-use 0x80 as an unsigned 128
v5 = usub_sat v0, v2
v6 = extractlane v5, 0
v7 = icmp_imm eq v6, 0
v8 = band v4, v7
return v8
}
; run
function %add_sub_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0x4.2 0.0 0.0 0.0]
v1 = vconst.f32x4 [0x1.0 0x1.0 0x1.0 0x1.0]
v2 = vconst.f32x4 [0x5.2 0x1.0 0x1.0 0x1.0]
v3 = fadd v0, v1
v4 = fcmp eq v3, v2
v6 = fsub v2, v1
v7 = fcmp eq v6, v0
v8 = band v4, v7
v9 = vall_true v8
return v9
}
; run
function %mul_div_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0x4.2 -0x2.1 0x2.0 0.0]
v1 = vconst.f32x4 [0x3.4 0x6.7 0x8.9 0xa.b]
v2 = vconst.f32x4 [0xd.68 -0xd.47 0x11.2 0x0.0]
v3 = fmul v0, v1
v4 = fcmp eq v3, v2
v6 = fdiv v2, v1
v7 = fcmp eq v6, v0
v8 = band v4, v7
v9 = vall_true v8
return v9
}
; run
function %sqrt_f64x2() -> b1 {
block0:
v0 = vconst.f64x2 [0x9.0 0x1.0]
v1 = sqrt v0
v2 = vconst.f64x2 [0x3.0 0x1.0]
v3 = fcmp eq v2, v1
v4 = vall_true v3
return v4
}
; run
function %fmax_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fmax v0, v1
return v2
}
; note below how NaNs are quieted but (unlike fmin), retain their sign: this discrepancy is allowed by non-determinism
; in the spec, see https://webassembly.github.io/spec/core/bikeshed/index.html#nan-propagation%E2%91%A0.
; run: %fmax_f64x2([-0x0.0 -0x1.0], [+0x0.0 0x1.0]) == [+0x0.0 0x1.0]
; run: %fmax_f64x2([-NaN NaN], [0x0.0 0x100.0]) == [-NaN NaN]
; run: %fmax_f64x2([NaN 0.0], [0.0 0.0]) == [NaN 0.0]
; run: %fmax_f64x2([-NaN 0.0], [0x1.0 0.0]) == [-NaN 0.0]
; run: %fmax_f64x2([NaN:0x42 0.0], [0x1.0 0.0]) == [NaN 0.0]
function %fmin_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = fmin v0, v1
return v2
}
; note below how NaNs are quieted and negative: this is due to non-determinism in the spec for NaNs, see
; https://webassembly.github.io/spec/core/bikeshed/index.html#nan-propagation%E2%91%A0.
; run: %fmin_f64x2([-0x0.0 -0x1.0], [+0x0.0 0x1.0]) == [-0x0.0 -0x1.0]
; run: %fmin_f64x2([-NaN 0x100.0], [0.0 NaN]) == [-NaN -NaN]
; run: %fmin_f64x2([NaN 0.0], [0.0 0.0]) == [-NaN 0.0]
; run: %fmin_f64x2([-NaN 0.0], [0x1.0 0.0]) == [-NaN 0.0]
; run: %fmin_f64x2([NaN:0x42 0.0], [0x1.0 0.0]) == [-NaN 0.0]
function %fneg_f64x2() -> b1 {
block0:
v0 = vconst.f64x2 [0x1.0 -0x1.0]
v1 = fneg v0
v2 = vconst.f64x2 [-0x1.0 0x1.0]
v3 = fcmp eq v1, v2
v4 = vall_true v3
return v4
}
; run
function %fneg_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0x0.0 -0x0.0 -Inf Inf]
v1 = fneg v0
v2 = vconst.f32x4 [-0x0.0 0x0.0 Inf -Inf]
v3 = fcmp eq v1, v2
v4 = vall_true v3
return v4
}
; run
function %fabs_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0x0.0 -0x1.0 0x2.0 -0x3.0]
v1 = fabs v0
v2 = vconst.f32x4 [0x0.0 0x1.0 0x2.0 0x3.0]
v3 = fcmp eq v1, v2
v4 = vall_true v3
return v4
}
; run
function %average_rounding_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [0 0 0 1 42 19 -1 0xffff]
v1 = vconst.i16x8 [0 1 2 4 42 18 -1 0]
v2 = vconst.i16x8 [0 1 1 3 42 19 -1 0x8000]
v3 = avg_round v0, v1
v4 = icmp eq v2, v3
v5 = vall_true v4
return v5
}
; run
function %iabs(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iabs v0
return v1
}
; run: %iabs([-42 -1 0 1]) == [42 1 0 1]

View File

@@ -1,247 +0,0 @@
test run
set enable_simd
target x86_64 legacy
function %icmp_eq_i8x16() -> b8 {
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 0x00
v2 = icmp eq v0, v1
v3 = extractlane v2, 0
return v3
}
; run
function %icmp_eq_i64x2() -> b64 {
block0:
v0 = vconst.i64x2 0xffffffffffffffffffffffffffffffff
v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff
v2 = icmp eq v0, v1
v3 = extractlane v2, 1
return v3
}
; run
function %icmp_ne_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [0 1 2 3]
v1 = vconst.i32x4 [7 7 7 7]
v2 = icmp ne v0, v1
v3 = vall_true v2
return v3
}
; run
function %icmp_ne_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [0 1 2 3 4 5 6 7]
v1 = vconst.i16x8 [0 1 2 3 4 5 6 7]
v2 = icmp ne v0, v1
v3 = vall_true v2
v4 = bint.i32 v3
v5 = icmp_imm eq v4, 0
return v5
}
; run
function %icmp_sgt_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 0]
v1 = vconst.i8x16 [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0xff]
v2 = icmp sgt v0, v1
v3 = raw_bitcast.i8x16 v2
v4 = vconst.i8x16 [0 0 0xff 0 0 0 0 0 0 0 0 0 0 0 0 0xff]
v7 = icmp eq v3, v4
v8 = vall_true v7
return v8
}
; run
function %icmp_sgt_i64x2() -> b1 {
block0:
v0 = vconst.i64x2 [0 -42]
v1 = vconst.i64x2 [-1 -43]
v2 = icmp sgt v0, v1
v8 = vall_true v2
return v8
}
; run
function %maxs_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 will be greater than -1 == 0xff with
; signed max
v1 = vconst.i8x16 [0xff 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
v2 = x86_pmaxs v0, v1
v8 = vall_true v2
return v8
}
; run
function %maxu_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [0 1 1 1 1 1 1 1]
v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] ; -1 == 0xff will be greater with unsigned max
v2 = x86_pmaxu v0, v1
v8 = vall_true v2
return v8
}
; run
function %mins_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [0 1 1 1]
v1 = vconst.i32x4 [-1 1 1 1] ; -1 == 0xff will be less with signed min
v2 = x86_pmins v0, v1
v8 = vall_true v2
return v8
}
; run
function %minu_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 < 2 with unsiged min
v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]
v2 = x86_pminu v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_ugt_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
v1 = vconst.i8x16 [0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
v2 = icmp ugt v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_sge_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 1 2 3 4 5 6 7]
v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1]
v2 = icmp sge v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_uge_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [1 2 3 4]
v1 = vconst.i32x4 [1 1 1 1]
v2 = icmp uge v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_slt_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [-1 1 1 1]
v1 = vconst.i32x4 [1 2 3 4]
v2 = icmp slt v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_ult_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [1 1 1 1]
v1 = vconst.i32x4 [-1 2 3 4] ; -1 = 0xffff... will be greater than 1 when unsigned
v2 = icmp ult v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_ult_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1]
v1 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1]
v2 = icmp ult v0, v1
v3 = vconst.i16x8 0x00
v4 = raw_bitcast.i16x8 v2
v5 = icmp eq v3, v4
v8 = vall_true v5
return v8
}
; run
function %icmp_sle_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 -1 0 0 0 0 0 0]
v1 = vconst.i16x8 [-1 0 0 0 0 0 0 0]
v2 = icmp sle v0, v1
v8 = vall_true v2
return v8
}
; run
function %icmp_ule_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0]
v1 = vconst.i16x8 [-1 -1 0 0 0 0 0 0]
v2 = icmp ule v0, v1
v8 = vall_true v2
return v8
}
; run
function %fcmp_eq_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0]
v1 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0]
v2 = fcmp eq v0, v1
v8 = vall_true v2
return v8
}
; run
function %fcmp_lt_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [0.0 -0x4.2 0x0.0 -0.0]
v1 = vconst.f32x4 [0x0.001 0x4.2 0x0.33333 0x1.0]
v2 = fcmp lt v0, v1
v8 = vall_true v2
return v8
}
; run
function %fcmp_ge_f64x2() -> b1 {
block0:
v0 = vconst.f64x2 [0x0.0 0x4.2]
v1 = vconst.f64x2 [0.0 0x4.1]
v2 = fcmp ge v0, v1
v8 = vall_true v2
return v8
}
; run
function %fcmp_uno_f64x2() -> b1 {
block0:
v0 = vconst.f64x2 [0.0 NaN]
v1 = vconst.f64x2 [NaN 0x4.1]
v2 = fcmp uno v0, v1
v8 = vall_true v2
return v8
}
; run
function %fcmp_gt_nans_f32x4() -> b1 {
block0:
v0 = vconst.f32x4 [NaN 0x42.0 -NaN NaN]
v1 = vconst.f32x4 [NaN NaN 0x42.0 Inf]
v2 = fcmp gt v0, v1
; now check that the result v2 is all zeroes
v3 = vconst.i32x4 0x00
v4 = raw_bitcast.i32x4 v2
v5 = icmp eq v3, v4
v8 = vall_true v5
return v8
}
; run

View File

@@ -1,14 +0,0 @@
test run
set enable_simd
target x86_64 legacy skylake
function %splat_i64x2() -> b1 {
block0:
v0 = iconst.i64 -1
v1 = splat.i64x2 v0
v2 = vconst.i64x2 [-1 -1]
v3 = icmp eq v1, v2
v8 = vall_true v3
return v8
}
; run

View File

@@ -1,39 +0,0 @@
test run
set enable_simd
target x86_64 legacy
function %fcvt_from_sint() -> b1 {
block0:
v0 = vconst.i32x4 [-1 0 1 123456789]
v1 = fcvt_from_sint.f32x4 v0
v2 = vconst.f32x4 [-0x1.0 0.0 0x1.0 0x75bcd18.0] ; 123456789 rounds to 123456792.0, an error of 3
v3 = fcmp eq v1, v2
v4 = vall_true v3
return v4
}
; run
function %fcvt_from_uint(i32x4) -> f32x4 {
block0(v0:i32x4):
v1 = fcvt_from_uint.f32x4 v0
return v1
}
; run: %fcvt_from_uint([0 0 0 0]) == [0x0.0 0x0.0 0x0.0 0x0.0]
function %fcvt_to_sint_sat(f32x4) -> i32x4 {
block0(v0:f32x4):
v1 = fcvt_to_sint_sat.i32x4 v0
return v1
}
; run: %fcvt_to_sint_sat([0x0.0 -0x1.0 0x1.0 0x1.0p100]) == [0 -1 1 0x7FFFFFFF]
; run: %fcvt_to_sint_sat([-0x8.1 0x0.0 0x0.0 -0x1.0p100]) == [-8 0 0 0x80000000]
function %fcvt_to_uint_sat(f32x4) -> i32x4 {
block0(v0:f32x4):
v1 = fcvt_to_uint_sat.i32x4 v0
return v1
}
; run: %fcvt_to_uint_sat([0x1.0 0x4.2 0x4.6 0x1.0p100]) == [1 4 4 0xFFFFFFFF]
; run: %fcvt_to_uint_sat([-0x8.1 -0x0.0 0x0.0 -0x1.0p100]) == [0 0 0 0]
; run: %fcvt_to_uint_sat([0xB2D05E00.0 0.0 0.0 0.0]) == [3000000000 0 0 0]

View File

@@ -1,21 +0,0 @@
test run
set enable_simd
target x86_64 legacy
function %vconst_zeroes() -> b1 {
block0:
v0 = vconst.i8x16 0x00
v1 = extractlane v0, 4
v2 = icmp_imm eq v1, 0
return v2
}
; run
function %vconst_ones() -> b1 {
block0:
v0 = vconst.i8x16 0xffffffffffffffffffffffffffffffff
v1 = extractlane v0, 2
v2 = icmp_imm eq v1, 0xff
return v2
}
; run

View File

@@ -0,0 +1,17 @@
test run
target aarch64
target arm
target s390x
; target x86_64 machinst TODO: Not yet implemented on x86_64
target x86_64 legacy
function %bnot_band() -> b1 {
block0:
v1 = bconst.b1 false
v2 = bconst.b1 true
v3 = bnot v1
v4 = band v3, v2
return v4
}
; run

View File

@@ -1,4 +1,8 @@
test run
target aarch64
target arm
target s390x
target x86_64 machinst
target x86_64 legacy
function u0:0() -> b1 {

View File

@@ -0,0 +1,155 @@
test run
target aarch64
target arm
target s390x
target x86_64 machinst
target x86_64 legacy
function %i8_iconst_0() -> i8 {
block0:
v1 = iconst.i8 0
return v1
}
; run: %i8_iconst_0() == 0
function %i8_iconst_1() -> i8 {
block0:
v1 = iconst.i8 1
return v1
}
; run: %i8_iconst_1() == 1
function %i8_iconst_neg_one() -> i8 {
block0:
v1 = iconst.i8 -1
return v1
}
; run: %i8_iconst_neg_one() == -1
function %i16_iconst_0() -> i16 {
block0:
v1 = iconst.i16 0
return v1
}
; run: %i16_iconst_0() == 0
function %i16_iconst_1() -> i16 {
block0:
v1 = iconst.i16 1
return v1
}
; run: %i16_iconst_1() == 1
function %i16_iconst_neg_one() -> i16 {
block0:
v1 = iconst.i16 -1
return v1
}
; run: %i16_iconst_neg_one() == -1
function %i32_iconst_0() -> i32 {
block0:
v1 = iconst.i32 0
return v1
}
; run: %i32_iconst_0() == 0
function %i32_iconst_1() -> i32 {
block0:
v1 = iconst.i32 1
return v1
}
; run: %i32_iconst_1() == 1
function %i32_iconst_neg_one() -> i32 {
block0:
v1 = iconst.i32 -1
return v1
}
; run: %i32_iconst_neg_one() == -1
function %i64_iconst_0() -> i64 {
block0:
v1 = iconst.i64 0
return v1
}
; run: %i64_iconst_0() == 0
function %i64_iconst_1() -> i64 {
block0:
v1 = iconst.i64 1
return v1
}
; run: %i64_iconst_1() == 1
function %i64_iconst_neg_one() -> i64 {
block0:
v1 = iconst.i64 -1
return v1
}
; run: %i64_iconst_neg_one() == -1
function %b8_bconst_false() -> b8 {
block0:
v1 = bconst.b8 false
return v1
}
; run: %b8_bconst_false() == false
function %b8_bconst_true() -> b8 {
block0:
v1 = bconst.b8 true
return v1
}
; run: %b8_bconst_true() == true
function %b16_bconst_false() -> b16 {
block0:
v1 = bconst.b16 false
return v1
}
; run: %b16_bconst_false() == false
function %b16_bconst_true() -> b16 {
block0:
v1 = bconst.b16 true
return v1
}
; run: %b16_bconst_true() == true
function %b32_bconst_false() -> b32 {
block0:
v1 = bconst.b32 false
return v1
}
; run: %b32_bconst_false() == false
function %b32_bconst_true() -> b32 {
block0:
v1 = bconst.b32 true
return v1
}
; run: %b32_bconst_true() == true
function %b64_bconst_false() -> b64 {
block0:
v1 = bconst.b64 false
return v1
}
; run: %b64_bconst_false() == false
; this verifies that returning b64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911
function %b64_bconst_true() -> b64 {
block0:
v1 = bconst.b64 true
return v1
}
; run: %b64_bconst_true() == true

View File

@@ -1,4 +1,7 @@
test run
target aarch64
target arm
target s390x
set avoid_div_traps=false
target x86_64 machinst

View File

@@ -1,7 +1,11 @@
test run
target aarch64
target arm
target s390x
; target x86_64 machinst TODO: Not yet implemented on x86_64
target i686 legacy
function u0:0() -> b1 {
function %uextend() -> b1 {
block0:
v0 = iconst.i32 0xffff_ee00
v1 = uextend.i64 v0
@@ -13,7 +17,7 @@ block0:
}
; run
function u0:1() -> b1 {
function %sextend() -> b1 {
block0:
v0 = iconst.i32 0xffff_ee00
v1 = sextend.i64 v0

View File

@@ -0,0 +1,206 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
; TODO: Cleanup these tests when we have native support for i128 immediates in CLIF's parser
function %add_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = iadd v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %add_i128(0, 0, 0, 0) == [0, 0]
; run: %add_i128(0, -1, -1, 0) == [-1, -1]
; run: %add_i128(1, 0, 0, 0) == [1, 0]
; run: %add_i128(1, 0, 1, 0) == [2, 0]
; run: %add_i128(1, 0, -1, -1) == [0, 0]
; run: %add_i128(-1, 0, 1, 0) == [0, 1]
; run: %add_i128(0x01234567_89ABCDEF, 0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210) == [-1, -1]
; run: %add_i128(0x06060606_06060606, 0xA00A00A0_0A00A00A, 0x30303030_30303030, 0x0BB0BB0B_B0BB0BB0) == [0x36363636_36363636, 0xABBABBAB_BABBABBA]
; run: %add_i128(0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111, 0x1DCB1111_1DCB1111) == [0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF]
function %sub_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = isub v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %sub_i128(0, 0, 0, 0) == [0, 0]
; run: %sub_i128(1, 0, 1, 0) == [0, 0]
; run: %sub_i128(1, 0, 0, 0) == [1, 0]
; run: %sub_i128(0, 0, 1, 0) == [-1, -1]
; run: %sub_i128(0, 0, -1, -1) == [1, 0]
; run: %sub_i128(-1, -1, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210) == [0x01234567_89ABCDEF, 0x01234567_89ABCDEF]
; run: %sub_i128(0x36363636_36363636, 0xABBABBAB_BABBABBA, 0x30303030_30303030, 0x0BB0BB0B_B0BB0BB0) == [0x06060606_06060606, 0xA00A00A0_0A00A00A]
; run: %sub_i128(0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111, 0x1DCB1111_1DCB1111) == [0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE]
function %mul_i128(i64, i64, i64, i64) -> i64, i64 {
block0(v0: i64,v1: i64,v2: i64,v3: i64):
v4 = iconcat v0, v1
v5 = iconcat v2, v3
v6 = imul v4, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %mul_i128(0, 0, 0, 0) == [0, 0]
; run: %mul_i128(1, 0, 1, 0) == [1, 0]
; run: %mul_i128(1, 0, 0, 0) == [0, 0]
; run: %mul_i128(0, 0, 1, 0) == [0, 0]
; run: %mul_i128(2, 0, 1, 0) == [2, 0]
; run: %mul_i128(2, 0, 2, 0) == [4, 0]
; run: %mul_i128(1, 0, -1, -1) == [-1, -1]
; run: %mul_i128(2, 0, -1, -1) == [-2, -1]
; run: %mul_i128(0x01010101_01010101, 0x01010101_01010101, 13, 0) == [0x0D0D0D0D_0D0D0D0D, 0x0D0D0D0D_0D0D0D0D]
; run: %mul_i128(13, 0, 0x01010101_01010101, 0x01010101_01010101) == [0x0D0D0D0D_0D0D0D0D, 0x0D0D0D0D_0D0D0D0D]
; run: %mul_i128(0x00000000_01234567, 0x89ABCDEF_00000000, 0x00000000_FEDCBA98, 0x76543210_00000000) == [0x0121FA00_23E20B28, 0xE2946058_00000000]
; run: %mul_i128(0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF) == [0xDB6B1E48_19BA1112, 0x5ECD38B5_9D1C2B7E]
; run: %mul_i128(0xC0FFEEEE_C0FFEEEE, 0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF, 0xDECAFFFF_DECAFFFF) == [0xDB6B1E48_19BA1112, 0x5ECD38B5_9D1C2B7E]
function %ishl_i128_i8(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = ishl.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 2) == [0x04040404_04040404, 0x04040404_04040404]
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020200, 0x02020202_02020202]
; run: %ishl_i128_i8(0x01010101_01010101, 0xffffffff_ffffffff, 66) == [0x00000000_00000000, 0x04040404_04040404]
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_i128_i8(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ishl_i128_i8(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ishl_i128_i8(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ishl_i128_i8(0x12340000_00000000, 0x56780000_00000000, 64) == [0x00000000_00000000, 0x12340000_00000000]
; run: %ishl_i128_i8(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_00000000, 0x00000000_12340000]
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 129) == [0x02020202_02020202, 0x02020202_02020202]
; run: %ishl_i128_i8(0x01010101_01010101, 0x01010101_01010101, 130) == [0x04040404_04040404, 0x04040404_04040404]
function %ishl_i128_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = uextend.i64 v2
v5 = iconcat v4, v4
v6 = ishl.i128 v3, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 2) == [0x04040404_04040404, 0x04040404_04040404]
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020200, 0x02020202_02020202]
; run: %ishl_i128_i128(0x01010101_01010101, 0xffffffff_ffffffff, 66) == [0x00000000_00000000, 0x04040404_04040404]
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ishl_i128_i128(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ishl_i128_i128(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ishl_i128_i128(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ishl_i128_i128(0x12340000_00000000, 0x56780000_00000000, 64) == [0x00000000_00000000, 0x12340000_00000000]
; run: %ishl_i128_i128(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_00000000, 0x00000000_12340000]
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 129) == [0x02020202_02020202, 0x02020202_02020202]
; run: %ishl_i128_i128(0x01010101_01010101, 0x01010101_01010101, 130) == [0x04040404_04040404, 0x04040404_04040404]
function %ushr_i128_i8(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = ushr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 2) == [0x40404040_40404040, 0x00404040_40404040]
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 66) == [0x00404040_40404040, 0x00000000_00000000]
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_i128_i8(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ushr_i128_i8(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ushr_i128_i8(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ushr_i128_i8(0x12340000_00000000, 0x56780000_00000000, 64) == [0x56780000_00000000, 0x00000000_00000000]
; run: %ushr_i128_i8(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_12340000, 0x00000000_56780000]
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 129) == [0x80808080_80808080, 0x00808080_80808080]
; run: %ushr_i128_i8(0x01010101_01010101, 0x01010101_01010101, 130) == [0x40404040_40404040, 0x00404040_40404040]
function %ushr_i128_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = uextend.i64 v2
v5 = iconcat v4, v4
v6 = ushr.i128 v3, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 2) == [0x40404040_40404040, 0x00404040_40404040]
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 66) == [0x00404040_40404040, 0x00000000_00000000]
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 0) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 128) == [0x01010101_01010101, 0x01010101_01010101]
; run: %ushr_i128_i128(0x00000000_00000001, 0x00000000_00000000, 0) == [0x00000000_00000001, 0x00000000_00000000]
; run: %ushr_i128_i128(0x00000000_00000000, 0x00000000_00000001, 0) == [0x00000000_00000000, 0x00000000_00000001]
; run: %ushr_i128_i128(0x12340000_00000000, 0x56780000_00000000, 0) == [0x12340000_00000000, 0x56780000_00000000]
; run: %ushr_i128_i128(0x12340000_00000000, 0x56780000_00000000, 64) == [0x56780000_00000000, 0x00000000_00000000]
; run: %ushr_i128_i128(0x12340000_00000000, 0x56780000_00000000, 32) == [0x00000000_12340000, 0x00000000_56780000]
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 129) == [0x80808080_80808080, 0x00808080_80808080]
; run: %ushr_i128_i128(0x01010101_01010101, 0x01010101_01010101, 130) == [0x40404040_40404040, 0x00404040_40404040]
function %sshr_i128_i8(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = sshr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %sshr_i128_i8(0x01010101_01010101, 0x81010101_01010101, 2) == [0x40404040_40404040, 0xe0404040_40404040]
; run: %sshr_i128_i8(0x00000000_00000000, 0xffffffff_ffffffff, 32) == [0xffffffff_00000000, 0xffffffff_ffffffff]
; run: %sshr_i128_i8(0x80000000_00000000, 0xffffffff_00000000, 32) == [0x00000000_80000000, 0xffffffff_ffffffff]
; run: %sshr_i128_i8(0x12345678_9abcdef0, 0x80101010_10101010, 66) == [0xe0040404_04040404, 0xffffffff_ffffffff]
; run: %sshr_i128_i8(0x00000000_00000000, 0x00000000_00000000, 64) == [0x00000000_00000000, 0x00000000_00000000]
; run: %sshr_i128_i8(0x12345678_9abcdef0, 0x80101010_10101010, 0) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr_i128_i8(0x12345678_9abcdef0, 0x80101010_10101010, 128) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr_i128_i8(0x01010101_01010101, 0x81010101_01010101, 129) == [0x80808080_80808080, 0xc0808080_80808080]
; run: %sshr_i128_i8(0x01010101_01010101, 0x81010101_01010101, 130) == [0x40404040_40404040, 0xe0404040_40404040]
function %sshr_i128_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = uextend.i64 v2
v5 = iconcat v4, v4
v6 = sshr.i128 v3, v5
v7, v8 = isplit v6
return v7, v8
}
; run: %sshr_i128_i128(0x01010101_01010101, 0x81010101_01010101, 2) == [0x40404040_40404040, 0xe0404040_40404040]
; run: %sshr_i128_i128(0x00000000_00000000, 0xffffffff_ffffffff, 32) == [0xffffffff_00000000, 0xffffffff_ffffffff]
; run: %sshr_i128_i128(0x80000000_00000000, 0xffffffff_00000000, 32) == [0x00000000_80000000, 0xffffffff_ffffffff]
; run: %sshr_i128_i128(0x12345678_9abcdef0, 0x80101010_10101010, 66) == [0xe0040404_04040404, 0xffffffff_ffffffff]
; run: %sshr_i128_i128(0x00000000_00000000, 0x00000000_00000000, 64) == [0x00000000_00000000, 0x00000000_00000000]
; run: %sshr_i128_i128(0x12345678_9abcdef0, 0x80101010_10101010, 0) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr_i128_i128(0x12345678_9abcdef0, 0x80101010_10101010, 128) == [0x12345678_9abcdef0, 0x80101010_10101010]
; run: %sshr_i128_i128(0x01010101_01010101, 0x81010101_01010101, 129) == [0x80808080_80808080, 0xc0808080_80808080]
; run: %sshr_i128_i128(0x01010101_01010101, 0x81010101_01010101, 130) == [0x40404040_40404040, 0xe0404040_40404040]

View File

@@ -1,4 +1,5 @@
test run
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
function %ctz(i64, i64) -> i8 {

View File

@@ -1,5 +1,6 @@
test run
target x86_64 machinst
target x86_64 legacy
function %reverse_bits_zero() -> b1 {
block0:

View File

@@ -1,6 +1,10 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
target x86_64 legacy
function %br_false() -> b1 {
block0:
v10 = iconst.i64 0x42

View File

@@ -0,0 +1,12 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
function %i128_const_0() -> i64, i64 {
block0:
v1 = iconst.i128 0
v2, v3 = isplit v1
return v2, v3
}
; run: %i128_const_0() == [0, 0]

View File

@@ -1,7 +1,10 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
target x86_64 legacy
function u0:0() -> b1 {
function %i128_uextend() -> b1 {
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
v1 = uextend.i128 v0
@@ -13,7 +16,7 @@ block0:
}
; run
function u0:1() -> b1 {
function %i128_sextend() -> b1 {
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
v1 = sextend.i128 v0

View File

@@ -0,0 +1,60 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
function %rotl(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = rotl.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %rotl(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x01010101_01010101, 73) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotl(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
function %rotr(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = iconcat v0, v1
v4 = rotr.i128 v3, v2
v5, v6 = isplit v4
return v5, v6
}
; run: %rotr(0x01010101_01010101, 0x01010101_01010101, 9) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr(0x01010101_01010101, 0x01010101_01010101, 73) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotr(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
function %rotl_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = rotl.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %rotl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x01010101_01010101, 73) == [0x02020202_02020202, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotl_amt_i128(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]
function %rotr_amt_i128(i64, i64, i8) -> i64, i64 {
block0(v0: i64, v1: i64, v2: i8):
v3 = uextend.i64 v2
v4 = iconcat v3, v3
v5 = iconcat v0, v1
v6 = rotr.i128 v5, v4
v7, v8 = isplit v6
return v7, v8
}
; run: %rotr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 9) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr_amt_i128(0x01010101_01010101, 0x01010101_01010101, 73) == [0x80808080_80808080, 0x80808080_80808080]
; run: %rotr_amt_i128(0x01010101_01010101, 0x02020202_02020202, 0) == [0x01010101_01010101, 0x02020202_02020202]
; run: %rotr_amt_i128(0x01010101_01010101, 0x03030303_03030303, 128) == [0x01010101_01010101, 0x03030303_03030303]

View File

@@ -1,6 +1,10 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst skylake
set enable_simd
target x86_64 legacy skylake
function %iadd_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0:i32x4, v1:i32x4):

View File

@@ -1,9 +1,14 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
set opt_level=speed_and_size
set enable_simd
target x86_64 machinst skylake
set opt_level=speed_and_size
set enable_simd
target x86_64 legacy haswell
;; Test if bitselect->vselect optimization works properly
;; x86_64 legacy: Test if bitselect->vselect optimization works properly
function %mask_from_icmp(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):

View File

@@ -1,4 +1,6 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst skylake

View File

@@ -0,0 +1,44 @@
test run
set enable_simd
target x86_64 legacy
function %maxs_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 will be greater than -1 == 0xff with
; signed max
v1 = vconst.i8x16 [0xff 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
v2 = x86_pmaxs v0, v1
v8 = vall_true v2
return v8
}
; run
function %maxu_i16x8() -> b1 {
block0:
v0 = vconst.i16x8 [0 1 1 1 1 1 1 1]
v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] ; -1 == 0xff will be greater with unsigned max
v2 = x86_pmaxu v0, v1
v8 = vall_true v2
return v8
}
; run
function %mins_i32x4() -> b1 {
block0:
v0 = vconst.i32x4 [0 1 1 1]
v1 = vconst.i32x4 [-1 1 1 1] ; -1 == 0xff will be less with signed min
v2 = x86_pmins v0, v1
v8 = vall_true v2
return v8
}
; run
function %minu_i8x16() -> b1 {
block0:
v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 < 2 with unsiged min
v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]
v2 = x86_pminu v0, v1
v8 = vall_true v2
return v8
}
; run

View File

@@ -1,6 +1,10 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst
set enable_simd
target x86_64 legacy
function %icmp_eq_i8x16() -> b8 {
block0:

View File

@@ -1,6 +1,10 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst
set enable_simd
target x86_64 legacy
function %fcvt_from_sint(i32x4) -> f32x4 {
block0(v0: i32x4):
@@ -15,6 +19,7 @@ block0(v0: i32x4):
v1 = fcvt_from_uint.f32x4 v0
return v1
}
; run: %fcvt_from_uint([0 0 0 0]) == [0x0.0 0x0.0 0x0.0 0x0.0]
; run: %fcvt_from_uint([0xFFFFFFFF 0 1 123456789]) == [0x100000000.0 0.0 0x1.0 0x75bcd18.0]
; Note that 0xFFFFFFFF is decimal 4,294,967,295 and is rounded up 1 to 4,294,967,296 in f32x4.
@@ -26,3 +31,11 @@ block0(v0:f32x4):
; run: %fcvt_to_sint_sat([0x0.0 -0x1.0 0x1.0 0x1.0p100]) == [0 -1 1 0x7FFFFFFF]
; run: %fcvt_to_sint_sat([-0x8.1 0x0.0 0x0.0 -0x1.0p100]) == [-8 0 0 0x80000000]
function %fcvt_to_uint_sat(f32x4) -> i32x4 {
block0(v0:f32x4):
v1 = fcvt_to_uint_sat.i32x4 v0
return v1
}
; run: %fcvt_to_uint_sat([0x1.0 0x4.2 0x4.6 0x1.0p100]) == [1 4 4 0xFFFFFFFF]
; run: %fcvt_to_uint_sat([-0x8.1 -0x0.0 0x0.0 -0x1.0p100]) == [0 0 0 0]
; run: %fcvt_to_uint_sat([0xB2D05E00.0 0.0 0.0 0.0]) == [3000000000 0 0 0]

View File

@@ -1,4 +1,6 @@
test run
; target aarch64 TODO: Not yet implemented on aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst
@@ -190,3 +192,20 @@ block0(v0: f64):
return v1
}
; run: %splat_f64(-0x1.1) == [-0x1.1 -0x1.1]
; narrow
function %snarrow(i32x4, i32x4) -> i16x8 {
block0(v0: i32x4, v1: i32x4):
v2 = snarrow v0, v1
return v2
}
; run: %snarrow([0 1 -1 0x0001ffff], [4 5 -6 0xffffffff]) == [0 1 -1 0x7fff 4 5 -6 0xffff]
function %unarrow(i32x4, i32x4) -> i16x8 {
block0(v0: i32x4, v1: i32x4):
v2 = unarrow v0, v1
return v2
}
; run: %unarrow([0 1 -1 0x0001ffff], [4 5 -6 0xffffffff]) == [0 1 0 0xffff 4 5 0 0]

View File

@@ -1,4 +1,8 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
set enable_simd
target x86_64 machinst
set enable_simd
target x86_64 legacy skylake

View File

@@ -0,0 +1,40 @@
test run
; target s390x TODO: Not yet implemented on s390x
; target aarch64 TODO: Not yet implemented on aarch64
set enable_simd
target x86_64 machinst
set enable_simd
target x86_64 legacy
set enable_simd
target x86_64 legacy skylake
function %vconst_zeroes() -> b1 {
block0:
v0 = vconst.i8x16 0x00
v1 = extractlane v0, 4
v2 = icmp_imm eq v1, 0
return v2
}
; run
function %vconst_ones() -> b1 {
block0:
v0 = vconst.i8x16 0xffffffffffffffffffffffffffffffff
v1 = extractlane v0, 2
v2 = icmp_imm eq v1, 0xff
return v2
}
; run
function %splat_i64x2() -> b1 {
block0:
v0 = iconst.i64 -1
v1 = splat.i64x2 v0
v2 = vconst.i64x2 [-1 -1]
v3 = icmp eq v1, v2
v8 = vall_true v3
return v8
}
; run

View File

@@ -1,4 +1,8 @@
test run
; target s390x TODO: Not yet implemented on s390x
target aarch64
set enable_simd
target x86_64 machinst
set enable_simd
target x86_64 legacy haswell

View File

@@ -1,6 +1,8 @@
test run
target x86_64
feature "experimental_x64"
target s390x
target aarch64
target x86_64 machinst
target x86_64 legacy
function %f(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) -> i64 {
block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32, v9: i32, v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32):