Enable the simd_i8x16_arith2 test for AArch64

Copyright (c) 2021, Arm Limited.
This commit is contained in:
Anton Kirilov
2021-06-14 16:51:38 +01:00
parent d8d4bf81b2
commit b09b123a9e
2 changed files with 92 additions and 71 deletions

View File

@@ -230,8 +230,7 @@ fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool {
("simd", _) if platform_is_s390x() => return true, ("simd", _) if platform_is_s390x() => return true,
// These are new instructions that are not really implemented in any backend. // These are new instructions that are not really implemented in any backend.
("simd", "simd_i8x16_arith2") ("simd", "simd_conversions")
| ("simd", "simd_conversions")
| ("simd", "simd_i16x8_extadd_pairwise_i8x16") | ("simd", "simd_i16x8_extadd_pairwise_i8x16")
| ("simd", "simd_i16x8_extmul_i8x16") | ("simd", "simd_i16x8_extmul_i8x16")
| ("simd", "simd_i16x8_q15mulr_sat_s") | ("simd", "simd_i16x8_q15mulr_sat_s")

View File

@@ -1181,86 +1181,108 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::Popcnt => { Opcode::Popcnt => {
let out_regs = get_output_reg(ctx, outputs[0]);
let in_regs = put_input_in_regs(ctx, inputs[0]);
let ty = ty.unwrap(); let ty = ty.unwrap();
let size = if ty == I128 {
ScalarSize::Size64
} else {
ScalarSize::from_operand_size(OperandSize::from_ty(ty))
};
let vec_size = if ty == I128 { if ty.is_vector() {
VectorSize::Size8x16 let lane_type = ty.lane_type();
} else { let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
VectorSize::Size8x8 let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
};
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap(); if lane_type != I8 {
return Err(CodegenError::Unsupported(format!(
"Unsupported SIMD vector lane type: {:?}",
lane_type
)));
}
// fmov tmp, in_lo ctx.emit(Inst::VecMisc {
// if ty == i128: op: VecMisc2::Cnt,
// mov tmp.d[1], in_hi rd,
// rn,
// cnt tmp.16b, tmp.16b / cnt tmp.8b, tmp.8b size: VectorSize::from_ty(ty),
// addv tmp, tmp.16b / addv tmp, tmp.8b / addp tmp.8b, tmp.8b, tmp.8b / (no instruction for 8-bit inputs)
//
// umov out_lo, tmp.b[0]
// if ty == i128:
// mov out_hi, 0
ctx.emit(Inst::MovToFpu {
rd: tmp,
rn: in_regs.regs()[0],
size,
});
if ty == I128 {
ctx.emit(Inst::MovToVec {
rd: tmp,
rn: in_regs.regs()[1],
idx: 1,
size: VectorSize::Size64x2,
}); });
} } else {
let out_regs = get_output_reg(ctx, outputs[0]);
let in_regs = put_input_in_regs(ctx, inputs[0]);
let size = if ty == I128 {
ScalarSize::Size64
} else {
ScalarSize::from_operand_size(OperandSize::from_ty(ty))
};
ctx.emit(Inst::VecMisc { let vec_size = if ty == I128 {
op: VecMisc2::Cnt, VectorSize::Size8x16
rd: tmp, } else {
rn: tmp.to_reg(), VectorSize::Size8x8
size: vec_size, };
});
match ScalarSize::from_ty(ty) { let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
ScalarSize::Size8 => {}
ScalarSize::Size16 => { // fmov tmp, in_lo
// ADDP is usually cheaper than ADDV. // if ty == i128:
ctx.emit(Inst::VecRRR { // mov tmp.d[1], in_hi
alu_op: VecALUOp::Addp, //
// cnt tmp.16b, tmp.16b / cnt tmp.8b, tmp.8b
// addv tmp, tmp.16b / addv tmp, tmp.8b / addp tmp.8b, tmp.8b, tmp.8b / (no instruction for 8-bit inputs)
//
// umov out_lo, tmp.b[0]
// if ty == i128:
// mov out_hi, 0
ctx.emit(Inst::MovToFpu {
rd: tmp,
rn: in_regs.regs()[0],
size,
});
if ty == I128 {
ctx.emit(Inst::MovToVec {
rd: tmp, rd: tmp,
rn: tmp.to_reg(), rn: in_regs.regs()[1],
rm: tmp.to_reg(), idx: 1,
size: VectorSize::Size8x8, size: VectorSize::Size64x2,
}); });
} }
ScalarSize::Size32 | ScalarSize::Size64 | ScalarSize::Size128 => {
ctx.emit(Inst::VecLanes {
op: VecLanesOp::Addv,
rd: tmp,
rn: tmp.to_reg(),
size: vec_size,
});
}
}
ctx.emit(Inst::MovFromVec { ctx.emit(Inst::VecMisc {
rd: out_regs.regs()[0], op: VecMisc2::Cnt,
rn: tmp.to_reg(), rd: tmp,
idx: 0, rn: tmp.to_reg(),
size: VectorSize::Size8x16, size: vec_size,
}); });
if ty == I128 {
lower_constant_u64(ctx, out_regs.regs()[1], 0); match ScalarSize::from_ty(ty) {
ScalarSize::Size8 => {}
ScalarSize::Size16 => {
// ADDP is usually cheaper than ADDV.
ctx.emit(Inst::VecRRR {
alu_op: VecALUOp::Addp,
rd: tmp,
rn: tmp.to_reg(),
rm: tmp.to_reg(),
size: VectorSize::Size8x8,
});
}
ScalarSize::Size32 | ScalarSize::Size64 | ScalarSize::Size128 => {
ctx.emit(Inst::VecLanes {
op: VecLanesOp::Addv,
rd: tmp,
rn: tmp.to_reg(),
size: vec_size,
});
}
}
ctx.emit(Inst::MovFromVec {
rd: out_regs.regs()[0],
rn: tmp.to_reg(),
idx: 0,
size: VectorSize::Size8x16,
});
if ty == I128 {
lower_constant_u64(ctx, out_regs.regs()[1], 0);
}
} }
} }