AArch64: Implement SIMD conversions

Copyright (c) 2020, Arm Limited.
This commit is contained in:
Anton Kirilov
2020-08-19 20:46:08 +01:00
parent 6513e90914
commit b895ac0e40
6 changed files with 424 additions and 202 deletions

View File

@@ -671,6 +671,15 @@ impl VectorSize {
VectorSize::Size64x2 => unreachable!(),
}
}
pub fn halve(&self) -> VectorSize {
match self {
VectorSize::Size8x16 => VectorSize::Size8x8,
VectorSize::Size16x8 => VectorSize::Size16x4,
VectorSize::Size32x4 => VectorSize::Size32x2,
_ => *self,
}
}
}
//=============================================================================

View File

@@ -1400,6 +1400,22 @@ impl MachInstEmit for Inst {
debug_assert!(!size.is_128bits());
(0b1, 0b10011, enc_size)
}
VecMisc2::Fcvtzs => {
debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
(0b0, 0b11011, enc_size)
}
VecMisc2::Fcvtzu => {
debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
(0b1, 0b11011, enc_size)
}
VecMisc2::Scvtf => {
debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
(0b0, 0b11101, enc_size & 0b1)
}
VecMisc2::Ucvtf => {
debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
(0b1, 0b11101, enc_size & 0b1)
}
};
sink.put4(enc_vec_rr_misc((q << 1) | u, size, bits_12_16, rd, rn));
}
@@ -1644,7 +1660,12 @@ impl MachInstEmit for Inst {
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::VecExtend { t, rd, rn } => {
&Inst::VecExtend {
t,
rd,
rn,
high_half,
} => {
let (u, immh) = match t {
VecExtendOp::Sxtl8 => (0b0, 0b001),
VecExtendOp::Sxtl16 => (0b0, 0b010),
@@ -1655,22 +1676,38 @@ impl MachInstEmit for Inst {
};
sink.put4(
0b000_011110_0000_000_101001_00000_00000
| ((high_half as u32) << 30)
| (u << 29)
| (immh << 19)
| (machreg_to_vec(rn) << 5)
| machreg_to_vec(rd.to_reg()),
);
}
&Inst::VecMiscNarrow { op, rd, rn, size } => {
debug_assert!(!size.is_128bits());
let size = match size.widen() {
VectorSize::Size64x2 => 0b10,
_ => unimplemented!(),
&Inst::VecMiscNarrow {
op,
rd,
rn,
size,
high_half,
} => {
let size = match size.lane_size() {
ScalarSize::Size8 => 0b00,
ScalarSize::Size16 => 0b01,
ScalarSize::Size32 => 0b10,
_ => panic!("Unexpected vector operand lane size!"),
};
let (u, bits_12_16) = match op {
VecMiscNarrowOp::Xtn => (0b0, 0b10010),
VecMiscNarrowOp::Sqxtn => (0b0, 0b10100),
VecMiscNarrowOp::Sqxtun => (0b1, 0b10010),
};
sink.put4(enc_vec_rr_misc(u, size, bits_12_16, rd, rn));
sink.put4(enc_vec_rr_misc(
((high_half as u32) << 1) | u,
size,
bits_12_16,
rd,
rn,
));
}
&Inst::VecMovElement {
rd,

View File

@@ -2008,6 +2008,7 @@ fn test_aarch64_binemit() {
t: VecExtendOp::Sxtl8,
rd: writable_vreg(4),
rn: vreg(27),
high_half: false,
},
"64A7080F",
"sxtl v4.8h, v27.8b",
@@ -2017,15 +2018,17 @@ fn test_aarch64_binemit() {
t: VecExtendOp::Sxtl16,
rd: writable_vreg(17),
rn: vreg(19),
high_half: true,
},
"71A6100F",
"sxtl v17.4s, v19.4h",
"71A6104F",
"sxtl2 v17.4s, v19.8h",
));
insns.push((
Inst::VecExtend {
t: VecExtendOp::Sxtl32,
rd: writable_vreg(30),
rn: vreg(6),
high_half: false,
},
"DEA4200F",
"sxtl v30.2d, v6.2s",
@@ -2035,15 +2038,17 @@ fn test_aarch64_binemit() {
t: VecExtendOp::Uxtl8,
rd: writable_vreg(3),
rn: vreg(29),
high_half: true,
},
"A3A7082F",
"uxtl v3.8h, v29.8b",
"A3A7086F",
"uxtl2 v3.8h, v29.16b",
));
insns.push((
Inst::VecExtend {
t: VecExtendOp::Uxtl16,
rd: writable_vreg(15),
rn: vreg(12),
high_half: false,
},
"8FA5102F",
"uxtl v15.4s, v12.4h",
@@ -2053,9 +2058,10 @@ fn test_aarch64_binemit() {
t: VecExtendOp::Uxtl32,
rd: writable_vreg(28),
rn: vreg(2),
high_half: true,
},
"5CA4202F",
"uxtl v28.2d, v2.2s",
"5CA4206F",
"uxtl2 v28.2d, v2.4s",
));
insns.push((
@@ -2088,11 +2094,36 @@ fn test_aarch64_binemit() {
rd: writable_vreg(22),
rn: vreg(8),
size: VectorSize::Size32x2,
high_half: false,
},
"1629A10E",
"xtn v22.2s, v8.2d",
));
insns.push((
Inst::VecMiscNarrow {
op: VecMiscNarrowOp::Sqxtn,
rd: writable_vreg(31),
rn: vreg(0),
size: VectorSize::Size16x8,
high_half: true,
},
"1F48614E",
"sqxtn2 v31.8h, v0.4s",
));
insns.push((
Inst::VecMiscNarrow {
op: VecMiscNarrowOp::Sqxtun,
rd: writable_vreg(16),
rn: vreg(23),
size: VectorSize::Size8x16,
high_half: false,
},
"F02A212E",
"sqxtun v16.8b, v23.8h",
));
insns.push((
Inst::VecRRR {
alu_op: VecALUOp::Sqadd,
@@ -3322,6 +3353,50 @@ fn test_aarch64_binemit() {
"shll v1.2d, v10.2s, #32",
));
insns.push((
Inst::VecMisc {
op: VecMisc2::Fcvtzs,
rd: writable_vreg(4),
rn: vreg(22),
size: VectorSize::Size32x4,
},
"C4BAA14E",
"fcvtzs v4.4s, v22.4s",
));
insns.push((
Inst::VecMisc {
op: VecMisc2::Fcvtzu,
rd: writable_vreg(29),
rn: vreg(15),
size: VectorSize::Size64x2,
},
"FDB9E16E",
"fcvtzu v29.2d, v15.2d",
));
insns.push((
Inst::VecMisc {
op: VecMisc2::Scvtf,
rd: writable_vreg(20),
rn: vreg(8),
size: VectorSize::Size32x4,
},
"14D9214E",
"scvtf v20.4s, v8.4s",
));
insns.push((
Inst::VecMisc {
op: VecMisc2::Ucvtf,
rd: writable_vreg(10),
rn: vreg(19),
size: VectorSize::Size64x2,
},
"6ADA616E",
"ucvtf v10.2d, v19.2d",
));
insns.push((
Inst::VecLanes {
op: VecLanesOp::Uminv,

View File

@@ -308,6 +308,14 @@ pub enum VecMisc2 {
Rev64,
/// Shift left long (by element size)
Shll,
/// Floating-point convert to signed integer, rounding toward zero
Fcvtzs,
/// Floating-point convert to unsigned integer, rounding toward zero
Fcvtzu,
/// Signed integer convert to floating-point
Scvtf,
/// Unsigned integer convert to floating-point
Ucvtf,
}
/// A Vector narrowing operation with two registers.
@@ -315,6 +323,10 @@ pub enum VecMisc2 {
pub enum VecMiscNarrowOp {
/// Extract Narrow
Xtn,
/// Signed saturating extract narrow
Sqxtn,
/// Signed saturating extract unsigned narrow
Sqxtun,
}
/// An operation across the lanes of vectors.
@@ -884,6 +896,7 @@ pub enum Inst {
t: VecExtendOp,
rd: Writable<Reg>,
rn: Reg,
high_half: bool,
},
/// Move vector element to another vector element.
@@ -901,6 +914,7 @@ pub enum Inst {
rd: Writable<Reg>,
rn: Reg,
size: VectorSize,
high_half: bool,
},
/// A vector ALU op.
@@ -1628,9 +1642,16 @@ fn aarch64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
collector.add_mod(rd);
collector.add_use(rn);
}
&Inst::VecMiscNarrow { rd, rn, .. } => {
collector.add_def(rd);
&Inst::VecMiscNarrow {
rd, rn, high_half, ..
} => {
collector.add_use(rn);
if high_half {
collector.add_mod(rd);
} else {
collector.add_def(rd);
}
}
&Inst::VecRRR {
alu_op, rd, rn, rm, ..
@@ -2300,10 +2321,16 @@ fn aarch64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
&mut Inst::VecMiscNarrow {
ref mut rd,
ref mut rn,
high_half,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
if high_half {
map_mod(mapper, rd);
} else {
map_def(mapper, rd);
}
}
&mut Inst::VecRRR {
alu_op,
@@ -3155,14 +3182,20 @@ impl Inst {
let rn = show_vreg_element(rn, mb_rru, 0, size);
format!("dup {}, {}", rd, rn)
}
&Inst::VecExtend { t, rd, rn } => {
let (op, dest, src) = match t {
VecExtendOp::Sxtl8 => ("sxtl", VectorSize::Size16x8, VectorSize::Size8x8),
VecExtendOp::Sxtl16 => ("sxtl", VectorSize::Size32x4, VectorSize::Size16x4),
VecExtendOp::Sxtl32 => ("sxtl", VectorSize::Size64x2, VectorSize::Size32x2),
VecExtendOp::Uxtl8 => ("uxtl", VectorSize::Size16x8, VectorSize::Size8x8),
VecExtendOp::Uxtl16 => ("uxtl", VectorSize::Size32x4, VectorSize::Size16x4),
VecExtendOp::Uxtl32 => ("uxtl", VectorSize::Size64x2, VectorSize::Size32x2),
&Inst::VecExtend { t, rd, rn, high_half } => {
let (op, dest, src) = match (t, high_half) {
(VecExtendOp::Sxtl8, false) => ("sxtl", VectorSize::Size16x8, VectorSize::Size8x8),
(VecExtendOp::Sxtl8, true) => ("sxtl2", VectorSize::Size16x8, VectorSize::Size8x16),
(VecExtendOp::Sxtl16, false) => ("sxtl", VectorSize::Size32x4, VectorSize::Size16x4),
(VecExtendOp::Sxtl16, true) => ("sxtl2", VectorSize::Size32x4, VectorSize::Size16x8),
(VecExtendOp::Sxtl32, false) => ("sxtl", VectorSize::Size64x2, VectorSize::Size32x2),
(VecExtendOp::Sxtl32, true) => ("sxtl2", VectorSize::Size64x2, VectorSize::Size32x4),
(VecExtendOp::Uxtl8, false) => ("uxtl", VectorSize::Size16x8, VectorSize::Size8x8),
(VecExtendOp::Uxtl8, true) => ("uxtl2", VectorSize::Size16x8, VectorSize::Size8x16),
(VecExtendOp::Uxtl16, false) => ("uxtl", VectorSize::Size32x4, VectorSize::Size16x4),
(VecExtendOp::Uxtl16, true) => ("uxtl2", VectorSize::Size32x4, VectorSize::Size16x8),
(VecExtendOp::Uxtl32, false) => ("uxtl", VectorSize::Size64x2, VectorSize::Size32x2),
(VecExtendOp::Uxtl32, true) => ("uxtl2", VectorSize::Size64x2, VectorSize::Size32x4),
};
let rd = show_vreg_vector(rd.to_reg(), mb_rru, dest);
let rn = show_vreg_vector(rn, mb_rru, src);
@@ -3179,11 +3212,22 @@ impl Inst {
let rn = show_vreg_element(rn, mb_rru, idx2, size);
format!("mov {}, {}", rd, rn)
}
&Inst::VecMiscNarrow { op, rd, rn, size } => {
let rd = show_vreg_vector(rd.to_reg(), mb_rru, size);
&Inst::VecMiscNarrow { op, rd, rn, size, high_half } => {
let dest_size = if high_half {
assert!(size.is_128bits());
size
} else {
size.halve()
};
let rd = show_vreg_vector(rd.to_reg(), mb_rru, dest_size);
let rn = show_vreg_vector(rn, mb_rru, size.widen());
let op = match op {
VecMiscNarrowOp::Xtn => "xtn",
let op = match (op, high_half) {
(VecMiscNarrowOp::Xtn, false) => "xtn",
(VecMiscNarrowOp::Xtn, true) => "xtn2",
(VecMiscNarrowOp::Sqxtn, false) => "sqxtn",
(VecMiscNarrowOp::Sqxtn, true) => "sqxtn2",
(VecMiscNarrowOp::Sqxtun, false) => "sqxtun",
(VecMiscNarrowOp::Sqxtun, true) => "sqxtun2",
};
format!("{} {}, {}", op, rd, rn)
}
@@ -3267,6 +3311,10 @@ impl Inst {
VecMisc2::Fsqrt => ("fsqrt", size),
VecMisc2::Rev64 => ("rev64", size),
VecMisc2::Shll => ("shll", size),
VecMisc2::Fcvtzs => ("fcvtzs", size),
VecMisc2::Fcvtzu => ("fcvtzu", size),
VecMisc2::Scvtf => ("scvtf", size),
VecMisc2::Ucvtf => ("ucvtf", size),
};
let rd_size = if is_shll { size.widen() } else { size };