aarch64: Specialize constant vector shifts (#5976)

* aarch64: Specialize constant vector shifts

This commit adds special lowering rules for
vector-shifts-by-constant-amounts to use dedicated instructions which
cuts down on the codegen here quite a bit for constant values.

* Fix codegen for 0-shift-rights

* Special-case zero left-shifts as well

* Remove left-shift special case
This commit is contained in:
Alex Crichton
2023-03-13 17:37:59 -05:00
committed by GitHub
parent 90c9bec225
commit d6ce632b5b
7 changed files with 554 additions and 45 deletions

View File

@@ -195,3 +195,108 @@ block0(v0: i32x4):
return v1
}
; run: %iabs([-42 -1 0 1]) == [42 1 0 1]
function %i8x16_shl_imm(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i32 2
v2 = ishl v0, v1
return v2
}
; run: %i8x16_shl_imm([0x01 0x02 0x04 0x08 0x10 0x20 0x40 0x80 0 0 0 0 0 0 0 0]) == [0x04 0x08 0x10 0x20 0x40 0x80 0 0 0 0 0 0 0 0 0 0]
function %i16x8_shl_imm(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i32 4
v2 = ishl v0, v1
return v2
}
; run: %i16x8_shl_imm([0x0001 0x0002 0x0004 0x0008 0x0010 0x0020 0x0040 0x0080]) == [0x0010 0x0020 0x0040 0x0080 0x0100 0x0200 0x0400 0x0800]
; run: %i16x8_shl_imm([0x0100 0x0200 0x0400 0x0800 0x1000 0x2000 0x4000 0x8000]) == [0x1000 0x2000 0x4000 0x8000 0 0 0 0]
function %i32x4_shl_imm(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 4
v2 = ishl v0, v1
return v2
}
; run: %i32x4_shl_imm([0x00000001 0x00000002 0x00000004 0x00000008]) == [0x00000010 0x00000020 0x00000040 0x00000080]
; run: %i32x4_shl_imm([0x10000000 0x00010000 0xf0000000 0x02000000]) == [0 0x00100000 0 0x20000000]
function %i64x2_shl_imm(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i32 32
v2 = ishl v0, v1
return v2
}
; run: %i64x2_shl_imm([0x1 0xf]) == [0x100000000 0xf00000000]
; run: %i64x2_shl_imm([0x100000000 0]) == [0 0]
function %i8x16_sshr_imm(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i32 2
v2 = sshr v0, v1
return v2
}
; run: %i8x16_shl_imm([0x01 0x02 0x04 0x08 0x10 0x20 0x40 0x80 0 0 0 0 0 0 0 0]) == [0 0 0x01 0x02 0x04 0x08 0x10 0xe0 0 0 0 0 0 0 0 0]
function %i16x8_sshr_imm(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i32 4
v2 = sshr v0, v1
return v2
}
; run: %i16x8_sshr_imm([0x0001 0x0002 0x0004 0x0008 0x0010 0x0020 0x0040 0x0080]) == [0 0 0 0 0x1 0x2 0x4 0x8]
; run: %i16x8_sshr_imm([-1 -2 -4 -8 -16 16 0x8000 0x80f3]) == [-1 -1 -1 -1 -1 1 0xf800 0xf80f]
function %i32x4_sshr_imm(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 4
v2 = sshr v0, v1
return v2
}
; run: %i32x4_sshr_imm([1 0xfc 0x80000000 0xf83f3000]) == [0 0xf 0xf8000000 0xff83f300]
function %i64x2_sshr_imm(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i32 32
v2 = sshr v0, v1
return v2
}
; run: %i64x2_sshr_imm([0x1 0xf]) == [0 0]
; run: %i64x2_sshr_imm([0x100000000 0]) == [1 0]
; run: %i64x2_sshr_imm([-1 -1]) == [-1 -1]
function %i8x16_ushr_imm(i8x16) -> i8x16 {
block0(v0: i8x16):
v1 = iconst.i32 2
v2 = ushr v0, v1
return v2
}
; run: %i8x16_shl_imm([0x01 0x02 0x04 0x08 0x10 0x20 0x40 0x80 0 0 0 0 0 0 0 0]) == [0 0 0x01 0x02 0x04 0x08 0x10 0x20 0 0 0 0 0 0 0 0]
function %i16x8_ushr_imm(i16x8) -> i16x8 {
block0(v0: i16x8):
v1 = iconst.i32 4
v2 = ushr v0, v1
return v2
}
; run: %i16x8_ushr_imm([0x0001 0x0002 0x0004 0x0008 0x0010 0x0020 0x0040 0x0080]) == [0 0 0 0 0x1 0x2 0x4 0x8]
; run: %i16x8_ushr_imm([-1 -2 -4 -8 -16 16 0x8000 0x80f3]) == [0x0fff 0x0fff 0x0fff 0x0fff 0x0fff 1 0x0800 0x080f]
function %i32x4_ushr_imm(i32x4) -> i32x4 {
block0(v0: i32x4):
v1 = iconst.i32 4
v2 = ushr v0, v1
return v2
}
; run: %i32x4_ushr_imm([1 0xfc 0x80000000 0xf83f3000]) == [0 0xf 0x08000000 0x0f83f300]
function %i64x2_ushr_imm(i64x2) -> i64x2 {
block0(v0: i64x2):
v1 = iconst.i32 32
v2 = ushr v0, v1
return v2
}
; run: %i64x2_ushr_imm([0x1 0xf]) == [0 0]
; run: %i64x2_ushr_imm([0x100000000 0]) == [1 0]
; run: %i64x2_ushr_imm([-1 -1]) == [0xffffffff 0xffffffff]