aarch64: Fix lowering amounts for shifts

This commit addresses two issues:
* A panic when shifting any non i128 type by i128 amounts (#3064)
* Wrong results when lowering shifts with small types (i8, i16)

In these types when shifting for amounts larger than the size of the
type, we would not get the wrapping behaviour that we see on i32 and i64.
This is because in these larger types, the wrapping behaviour is automatically
implemented by using the appropriate instruction, however we do not
have i8 and i16 specific instructions, so we have to manually wrap
the shift amount with an AND instruction.

This issue is also found on x86_64 and s390x, and a separate issue will
be filed for those.

Closes #3064
This commit is contained in:
Afonso Bordado
2021-07-09 14:40:15 +01:00
committed by Anton Kirilov
parent 6c3d7092b9
commit db5566dadb
8 changed files with 1199 additions and 165 deletions

View File

@@ -801,10 +801,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let out_regs = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty == I128 {
// TODO: We can use immlogic here
let src = put_input_in_regs(ctx, inputs[0]);
// We can ignore the top half of the shift amount register
let amt = put_input_in_regs(ctx, inputs[1]).regs()[0];
let amt = lower_shift_amt(ctx, inputs[1], ty, out_regs.regs()[0]).unwrap_reg();
match op {
Opcode::Ishl => emit_shl_i128(ctx, src, out_regs, amt),
@@ -828,7 +826,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ => unreachable!(),
};
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg_immshift(ctx, inputs[1], ty_bits(ty));
let rm = lower_shift_amt(ctx, inputs[1], ty, out_regs.regs()[0]);
let alu_op = match op {
Opcode::Ishl => choose_32_64(ty, ALUOp::Lsl32, ALUOp::Lsl64),
Opcode::Ushr => choose_32_64(ty, ALUOp::Lsr32, ALUOp::Lsr64),