Remove '%test_' prefix from SIMD filetests

This commit is contained in:
Andrew Brown
2020-03-06 10:21:06 -08:00
parent d19f727850
commit 4a0f53464a
12 changed files with 41 additions and 45 deletions

View File

@@ -2,7 +2,7 @@ test binemit
target x86_64
; Ensure raw_bitcast emits no instructions.
function %test_raw_bitcast_i16x8_to_b32x4() {
function %raw_bitcast_i16x8_to_b32x4() {
block0:
[-, %rbx] v0 = bconst.b16 true
[-, %xmm2] v1 = scalar_to_vector.b16x8 v0

View File

@@ -5,28 +5,28 @@ target x86_64
;; These scalar_to_vector tests avoid the use of REX prefixes with the speed_and_size optimization flag.
function %test_scalar_to_vector_b8() {
function %scalar_to_vector_b8() {
block0:
[-, %rax] v0 = bconst.b8 true
[-, %xmm0] v1 = scalar_to_vector.b8x16 v0 ; bin: 66 0f 6e c0
return
}
function %test_scalar_to_vector_i16() {
function %scalar_to_vector_i16() {
block0:
[-, %rbx] v0 = iconst.i16 42
[-, %xmm2] v1 = scalar_to_vector.i16x8 v0 ; bin: 66 0f 6e d3
return
}
function %test_scalar_to_vector_b32() {
function %scalar_to_vector_b32() {
block0:
[-, %rcx] v0 = bconst.b32 false
[-, %xmm3] v1 = scalar_to_vector.b32x4 v0 ; bin: 66 0f 6e d9
return
}
function %test_scalar_to_vector_i64() {
function %scalar_to_vector_i64() {
block0:
[-, %rdx] v0 = iconst.i64 42
[-, %xmm7] v1 = scalar_to_vector.i64x2 v0 ; bin: 66 48 0f 6e fa

View File

@@ -5,7 +5,7 @@ target x86_64 haswell
; for insertlane, floats are legalized differently than integers and booleans; integers and
; booleans use x86_pinsr which is manually placed in the IR so that it can be binemit-tested
function %test_insertlane_b8() {
function %insertlane_b8() {
block0:
[-, %rax] v0 = bconst.b8 true
[-, %rbx] v1 = bconst.b8 false
@@ -14,7 +14,7 @@ block0:
return
}
function %test_insertlane_i16() {
function %insertlane_i16() {
block0:
[-, %rax] v0 = iconst.i16 4
[-, %rbx] v1 = iconst.i16 5
@@ -23,7 +23,7 @@ block0:
return
}
function %test_insertlane_i32() {
function %insertlane_i32() {
block0:
[-, %rax] v0 = iconst.i32 42
[-, %rbx] v1 = iconst.i32 99
@@ -32,7 +32,7 @@ block0:
return
}
function %test_insertlane_b64() {
function %insertlane_b64() {
block0:
[-, %rax] v0 = bconst.b64 true
[-, %rbx] v1 = bconst.b64 false
@@ -44,7 +44,7 @@ block0:
; for extractlane, floats are legalized differently than integers and booleans; integers and
; booleans use x86_pextr which is manually placed in the IR so that it can be binemit-tested
function %test_extractlane_b8() {
function %extractlane_b8() {
block0:
[-, %rax] v0 = bconst.b8 true
[-, %xmm0] v1 = splat.b8x16 v0
@@ -52,7 +52,7 @@ block0:
return
}
function %test_extractlane_i16() {
function %extractlane_i16() {
block0:
[-, %rax] v0 = iconst.i16 4
[-, %xmm1] v1 = splat.i16x8 v0
@@ -60,7 +60,7 @@ block0:
return
}
function %test_extractlane_i32() {
function %extractlane_i32() {
block0:
[-, %rax] v0 = iconst.i32 42
[-, %xmm4] v1 = splat.i32x4 v0
@@ -68,7 +68,7 @@ block0:
return
}
function %test_extractlane_b64() {
function %extractlane_b64() {
block0:
[-, %rax] v0 = bconst.b64 false
[-, %xmm2] v1 = splat.b64x2 v0
@@ -78,7 +78,7 @@ block0:
;; shuffle
function %test_pshufd() {
function %pshufd() {
block0:
[-, %rax] v0 = iconst.i32 42
[-, %xmm0] v1 = scalar_to_vector.i32x4 v0 ; bin: 66 40 0f 6e c0
@@ -86,7 +86,7 @@ block0:
return
}
function %test_pshufb() {
function %pshufb() {
block0:
[-, %rax] v0 = iconst.i8 42
[-, %xmm0] v1 = scalar_to_vector.i8x16 v0 ; bin: 66 40 0f 6e c0

View File

@@ -5,7 +5,7 @@ set enable_simd
target x86_64
; Ensure that scalar_to_vector emits no instructions for floats (already exist in an XMM register)
function %test_scalar_to_vector_f32() -> f32x4 baldrdash_system_v {
function %scalar_to_vector_f32() -> f32x4 baldrdash_system_v {
block0:
v0 = f32const 0x0.42
v1 = scalar_to_vector.f32x4 v0

View File

@@ -4,7 +4,7 @@ target x86_64 skylake
;; shuffle
function %test_shuffle_different_ssa_values() -> i8x16 {
function %shuffle_different_ssa_values() -> i8x16 {
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 0x01
@@ -18,7 +18,7 @@ block0:
; nextln: v6 = x86_pshufb v1, v5
; nextln: v2 = bor v4, v6
function %test_shuffle_same_ssa_value() -> i8x16 {
function %shuffle_same_ssa_value() -> i8x16 {
block0:
v1 = vconst.i8x16 0x01
v2 = shuffle v1, v1, 0x13000000000000000000000000000000 ; pick the fourth lane of v1 and the rest from the first lane of v1
@@ -30,7 +30,7 @@ block0:
;; splat
function %test_splat_i32() -> i32x4 {
function %splat_i32() -> i32x4 {
block0:
v0 = iconst.i32 42
v1 = splat.i32x4 v0
@@ -43,7 +43,7 @@ block0:
; nextln: return v1
; nextln: }
function %test_splat_i64() -> i64x2 {
function %splat_i64() -> i64x2 {
block0:
v0 = iconst.i64 42
v1 = splat.i64x2 v0
@@ -55,7 +55,7 @@ block0:
; nextln: v1 = x86_pinsr v2, 1, v0
; nextln: return v1
function %test_splat_b16() -> b16x8 {
function %splat_b16() -> b16x8 {
block0:
v0 = bconst.b16 true
v1 = splat.b16x8 v0
@@ -70,7 +70,7 @@ block0:
; nextln: v1 = raw_bitcast.b16x8 v5
; nextln: return v1
function %test_splat_i8() -> i8x16 {
function %splat_i8() -> i8x16 {
block0:
v0 = iconst.i8 42
v1 = splat.i8x16 v0

View File

@@ -1,7 +1,7 @@
test run
set enable_simd
function %test_shuffle_different_ssa_values() -> b1 {
function %shuffle_different_ssa_values() -> b1 {
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42]
@@ -13,7 +13,7 @@ block0:
}
; run
function %test_shuffle_same_ssa_value() -> b1 {
function %shuffle_same_ssa_value() -> b1 {
block0:
v0 = vconst.i8x16 0x01000000_00000000_00000000_00000000 ; note where lane 15 is when written with hexadecimal syntax
v1 = shuffle v0, v0, 0x0f0f0f0f_0f0f0f0f_0f0f0f0f_0f0f0f0f ; use the last lane of v0 to fill all lanes
@@ -56,7 +56,7 @@ block0:
; TODO once SIMD vector comparison is implemented, remove use of extractlane below
function %test_insertlane_b8() -> b8 {
function %insertlane_b8() -> b8 {
block0:
v1 = bconst.b8 true
v2 = vconst.b8x16 [false false false false false false false false false false false false false
@@ -67,7 +67,7 @@ block0:
}
; run
function %test_insertlane_f32() -> b1 {
function %insertlane_f32() -> b1 {
block0:
v0 = f32const 0x42.42
v1 = vconst.f32x4 0x00
@@ -78,7 +78,7 @@ block0:
}
; run
function %test_insertlane_f64_lane1() -> b1 {
function %insertlane_f64_lane1() -> b1 {
block0:
v0 = f64const 0x42.42
v1 = vconst.f64x2 0x00
@@ -89,7 +89,7 @@ block0:
}
; run
function %test_insertlane_f64_lane0() -> b1 {
function %insertlane_f64_lane0() -> b1 {
block0:
v0 = f64const 0x42.42
v1 = vconst.f64x2 0x00
@@ -100,7 +100,7 @@ block0:
}
; run
function %test_extractlane_b8() -> b8 {
function %extractlane_b8() -> b8 {
block0:
v1 = vconst.b8x16 [false false false false false false false false false false true false false
false false false]
@@ -109,7 +109,7 @@ block0:
}
; run
function %test_extractlane_i16() -> b1 {
function %extractlane_i16() -> b1 {
block0:
v0 = vconst.i16x8 0x00080007000600050004000300020001
v1 = extractlane v0, 1
@@ -118,7 +118,7 @@ block0:
}
; run
function %test_extractlane_f32() -> b1 {
function %extractlane_f32() -> b1 {
block0:
v0 = f32const 0x42.42
v1 = vconst.f32x4 [0x00.00 0x00.00 0x00.00 0x42.42]
@@ -128,7 +128,7 @@ block0:
}
; run
function %test_extractlane_i32_with_vector_reuse() -> b1 {
function %extractlane_i32_with_vector_reuse() -> b1 {
block0:
v0 = iconst.i32 42
v1 = iconst.i32 99
@@ -147,7 +147,7 @@ block0:
}
; run
function %test_extractlane_f32_with_vector_reuse() -> b1 {
function %extractlane_f32_with_vector_reuse() -> b1 {
block0:
v0 = f32const 0x42.42
v1 = f32const 0x99.99

View File

@@ -3,7 +3,7 @@ set opt_level=speed_and_size
set enable_simd
target x86_64
function %test_vconst_b8() {
function %vconst_b8() {
block0:
[-, %xmm2] v0 = vconst.b8x16 0x01 ; bin: 0f 10 15 00000008 PCRelRodata4(15)
[-, %xmm3] v1 = vconst.b8x16 0x02 ; bin: 0f 10 1d 00000011 PCRelRodata4(31)

View File

@@ -4,7 +4,7 @@ set enable_probestack=false
target x86_64 haswell
; use baldrdash calling convention here for simplicity (avoids prologue, epilogue)
function %test_vconst_i32() -> i32x4 baldrdash_system_v {
function %vconst_i32() -> i32x4 baldrdash_system_v {
block0:
v0 = vconst.i32x4 0x1234
return v0

View File

@@ -2,9 +2,7 @@ test binemit
set enable_simd
target x86_64
; TODO move to vconst-compile.clif or vconst-binemit.clif
function %test_vconst_optimizations() {
function %vconst_optimizations() {
block0:
[-, %xmm4] v0 = vconst.b8x16 0x00 ; bin: 66 0f ef e4
[-, %xmm7] v1 = vconst.b8x16 0xffffffffffffffffffffffffffffffff ; bin: 66 0f 74 ff

View File

@@ -2,9 +2,7 @@ test run
set enable_simd
target x86_64
; TODO move to vconst-run.clif
function %test_vconst_zeroes() -> b1 {
function %vconst_zeroes() -> b1 {
block0:
v0 = vconst.i8x16 0x00
v1 = extractlane v0, 4
@@ -13,7 +11,7 @@ block0:
}
; run
function %test_vconst_ones() -> b1 {
function %vconst_ones() -> b1 {
block0:
v0 = vconst.i8x16 0xffffffffffffffffffffffffffffffff
v1 = extractlane v0, 2

View File

@@ -2,7 +2,7 @@ test rodata
set enable_simd=true
target x86_64 haswell
function %test_vconst_i32() -> i32x4 {
function %vconst_i32() -> i32x4 {
block0:
v0 = vconst.i32x4 0x1234
return v0
@@ -10,7 +10,7 @@ block0:
; sameln: [34, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
function %test_vconst_b16() -> b16x8 {
function %vconst_b16() -> b16x8 {
block0:
v0 = vconst.b16x8 [true false true false true false true true]
return v0

View File

@@ -1,7 +1,7 @@
test run
set enable_simd
function %test_vconst_syntax() -> b1 {
function %vconst_syntax() -> b1 {
block0:
v0 = vconst.i32x4 0x00000004_00000003_00000002_00000001 ; build constant using hexadecimal syntax
v1 = vconst.i32x4 [1 2 3 4] ; build constant using literal list syntax