x64: Add shuffle specialization for palignr (#5999)

* x64: Add `shuffle` specialization for `palignr`

This commit adds specializations for the `palignr` instruction to the
x64 backend to specialize some more patterns of byte shuffles.

* Fix tests
This commit is contained in:
Alex Crichton
2023-03-13 16:01:24 -05:00
committed by GitHub
parent bba49646c3
commit e2a6fe99c2
6 changed files with 218 additions and 18 deletions

View File

@@ -196,7 +196,7 @@ function %not_single_pshufd(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bitcast.i8x16 little v0
v3 = bitcast.i8x16 little v1
v4 = shuffle v2, v3, [8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
v4 = shuffle v2, v3, [8 9 10 11 12 13 14 15 20 21 22 23 20 21 22 23]
v5 = bitcast.i32x4 little v4
return v5
}
@@ -205,7 +205,7 @@ block0(v0: i32x4, v1: i32x4):
; pushq %rbp
; movq %rsp, %rbp
; block0:
; shufps $78, %xmm0, %xmm1, %xmm0
; shufps $94, %xmm0, %xmm1, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
@@ -215,7 +215,7 @@ block0(v0: i32x4, v1: i32x4):
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; shufps $0x4e, %xmm1, %xmm0
; shufps $0x5e, %xmm1, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq
@@ -644,3 +644,148 @@ block0(v0: i8x16, v1: i8x16):
; popq %rbp
; retq
function %palignr_0(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $0, %xmm0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq
function %palignr_1(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $1, %xmm0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $1, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq
function %palignr_5(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $5, %xmm0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $5, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq
function %palignr_11(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $11, %xmm0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $0xb, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq
function %palignr_16(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $16, %xmm0, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movdqa %xmm0, %xmm4
; movdqa %xmm1, %xmm0
; palignr $0x10, %xmm4, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq

View File

@@ -1948,3 +1948,28 @@ block0(v0: i32x4):
; popq %rbp
; retq
function %palignr_11(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = shuffle v0, v1, [11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26]
return v2
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; vpalignr $11, %xmm1, %xmm0, %xmm0
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; vpalignr $0xb, %xmm0, %xmm1, %xmm0
; movq %rbp, %rsp
; popq %rbp
; retq

View File

@@ -73,7 +73,7 @@ block0(v0: i32x4, v1: i32x4):
v5 = bitcast.i32x4 little v4
return v5
}
; run: %pshufd_0022([1 2 3 4], [5 6 7 8]) == [4 2 3 1]
; run: %pshufd_3120([1 2 3 4], [5 6 7 8]) == [4 2 3 1]
function %pshufd_7546(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
@@ -83,7 +83,7 @@ block0(v0: i32x4, v1: i32x4):
v5 = bitcast.i32x4 little v4
return v5
}
; run: %pshufd_0022([1 2 3 4], [5 6 7 8]) == [8 6 5 7]
; run: %pshufd_7546([1 2 3 4], [5 6 7 8]) == [8 6 5 7]
function %not_pshufd(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
@@ -93,7 +93,17 @@ block0(v0: i32x4, v1: i32x4):
v5 = bitcast.i32x4 little v4
return v5
}
; run: %pshufd_0022([1 2 3 4], [5 6 7 8]) == [3 4 5 6]
; run: %not_pshufd([1 2 3 4], [5 6 7 8]) == [3 4 5 6]
function %not_pshufd2(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bitcast.i8x16 little v0
v3 = bitcast.i8x16 little v1
v4 = shuffle v2, v3, [8 9 10 11 12 13 14 15 20 21 22 23 20 21 22 23]
v5 = bitcast.i32x4 little v4
return v5
}
; run: %not_pshufd2([1 2 3 4], [5 6 7 8]) == [3 4 6 6]
function %punpckldq(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):