This commit goes through the `runtests` folder of the `filetests` test suite and ensure that everything which uses simd or float-related instructions on x64 is executed with the baseline support for x86_64 in addition to adding in AVX support. Most of the instructions used have AVX equivalents so this should help test all of the equivalents in addition to the codegen filetests in the x64 folder.
51 lines
1.4 KiB
Plaintext
51 lines
1.4 KiB
Plaintext
test interpret
|
|
test run
|
|
set enable_simd
|
|
target x86_64
|
|
target x86_64 has_avx
|
|
target aarch64
|
|
target s390x
|
|
|
|
function %simd_icmp_sge_i8(i8x16, i8x16) -> i8x16 {
|
|
block0(v0: i8x16, v1: i8x16):
|
|
v2 = icmp sge v0, v1
|
|
return v2
|
|
}
|
|
; run: %simd_icmp_sge_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 1 0 0 0 0 0 0 0 0 0 0]) == [-1 -1 -1 0 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]
|
|
|
|
function %simd_icmp_sge_i16(i16x8, i16x8) -> i16x8 {
|
|
block0(v0: i16x8, v1: i16x8):
|
|
v2 = icmp sge v0, v1
|
|
return v2
|
|
}
|
|
; run: %simd_icmp_sge_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 1 0 0]) == [-1 -1 -1 0 0 -1 -1 -1]
|
|
|
|
function %simd_icmp_sge_i32(i32x4, i32x4) -> i32x4 {
|
|
block0(v0: i32x4, v1: i32x4):
|
|
v2 = icmp sge v0, v1
|
|
return v2
|
|
}
|
|
; run: %simd_icmp_sge_i32([0 1 -1 0], [0 0 -1 1]) == [-1 -1 -1 0]
|
|
; run: %simd_icmp_sge_i32([-5 1 0 0], [-1 1 0 0]) == [0 -1 -1 -1]
|
|
|
|
function %simd_icmp_sge_i64(i64x2, i64x2) -> i64x2 {
|
|
block0(v0: i64x2, v1: i64x2):
|
|
v2 = icmp sge v0, v1
|
|
return v2
|
|
}
|
|
; run: %simd_icmp_sge_i64([0 1], [0 0]) == [-1 -1]
|
|
; run: %simd_icmp_sge_i64([-1 0], [-1 1]) == [-1 0]
|
|
; run: %simd_icmp_sge_i64([-5 1], [-1 1]) == [0 -1]
|
|
; run: %simd_icmp_sge_i64([0 0], [0 0]) == [-1 -1]
|
|
|
|
|
|
function %icmp_sge_const_i16x8() -> i8 {
|
|
block0:
|
|
v0 = vconst.i16x8 [-1 1 2 3 4 5 6 7]
|
|
v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1]
|
|
v2 = icmp sge v0, v1
|
|
v8 = vall_true v2
|
|
return v8
|
|
}
|
|
; run: %icmp_sge_const_i16x8() == 1
|