Add x86 SIMD sshr and ushr

Only the shifts with applicable SSE2 instructions are implemented here: PSRL* (for ushr) only has 16-64 bit instructions and PSRA* (for sshr) only has 16-32 bit instructions.
This commit is contained in:
Andrew Brown
2019-10-07 10:38:35 -07:00
parent 808885ce56
commit f1904bffea
7 changed files with 197 additions and 2 deletions

View File

@@ -19,3 +19,33 @@ ebb0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]):
[-, %xmm6] v2 = x86_psll v0, v1 ; bin: 66 0f f3 f3
return v2
}
function %ushr_i16x8(i16x8, i64x2) -> i16x8 {
ebb0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]):
[-, %xmm2] v2 = x86_psrl v0, v1 ; bin: 66 0f d1 d1
return v2
}
function %ushr_i32x4(i32x4, i64x2) -> i32x4 {
ebb0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]):
[-, %xmm4] v2 = x86_psrl v0, v1 ; bin: 66 0f d2 e0
return v2
}
function %ushr_i64x2(i64x2, i64x2) -> i64x2 {
ebb0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]):
[-, %xmm6] v2 = x86_psrl v0, v1 ; bin: 66 0f d3 f3
return v2
}
function %sshr_i16x8(i16x8, i64x2) -> i16x8 {
ebb0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]):
[-, %xmm2] v2 = x86_psra v0, v1 ; bin: 66 0f e1 d1
return v2
}
function %sshr_i32x4(i32x4, i64x2) -> i32x4 {
ebb0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]):
[-, %xmm4] v2 = x86_psra v0, v1 ; bin: 66 0f e2 e0
return v2
}

View File

@@ -11,3 +11,23 @@ ebb0:
; nextln: v2 = x86_psll v1, v3
return v2
}
function %ushr_i64x2() -> i64x2 {
ebb0:
v0 = iconst.i32 1
v1 = vconst.i64x2 [1 2]
v2 = ushr v1, v0
; check: v3 = bitcast.i64x2 v0
; nextln: v2 = x86_psrl v1, v3
return v2
}
function %sshr_i16x8() -> i16x8 {
ebb0:
v0 = iconst.i32 1
v1 = vconst.i16x8 [1 2 4 8 16 32 64 128]
v2 = sshr v1, v0
; check: v3 = bitcast.i64x2 v0
; nextln: v2 = x86_psra v1, v3
return v2
}

View File

@@ -37,3 +37,71 @@ ebb0:
return v7
}
; run
function %ushr_i64x2() -> b1 {
ebb0:
v0 = iconst.i32 1
v1 = vconst.i64x2 [1 2]
v2 = ushr v1, v0
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0
v5 = extractlane v2, 1
v6 = icmp_imm eq v5, 1
v7 = band v4, v6
return v7
}
; run
function %ushr_too_large_i32x4() -> b1 {
ebb0:
v0 = iconst.i32 33 ; note that this will shift off the end of each lane
v1 = vconst.i32x4 [1 2 4 8]
v2 = ushr v1, v0
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0
v5 = extractlane v2, 3
v6 = icmp_imm eq v5, 0
v7 = band v4, v6
return v7
}
; run
function %sshr_i16x8() -> b1 {
ebb0:
v0 = iconst.i32 1
v1 = vconst.i16x8 [-1 2 4 8 -16 32 64 128]
v2 = sshr v1, v0
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0xffff ; because of the shifted-in sign-bit, this remains 0xffff == -1
v5 = extractlane v2, 4
v6 = icmp_imm eq v5, 0xfff8 ; -16 has been shifted to -8 == 0xfff8
v7 = band v4, v6
return v7
}
; run
function %sshr_too_large_i32x4() -> b1 {
ebb0:
v0 = iconst.i32 33 ; note that this will shift off the end of each lane
v1 = vconst.i32x4 [1 2 4 -8]
v2 = sshr v1, v0
v3 = extractlane v2, 0
v4 = icmp_imm eq v3, 0
v5 = extractlane v2, 3
v6 = icmp_imm eq v5, 0xffff_ffff ; shifting in the sign-bit repeatedly fills the result with 1s
v7 = band v4, v6
return v7
}
; run