aarch64: Migrate bitrev/clz/cls/ctz to ISLE (#3658)
This commit migrates these existing instructions to ISLE from the manual
lowerings implemented today. This was mostly straightforward but while I
was at it I fixed what appeared to be broken translations for I{8,16}
for `clz`, `cls`, and `ctz`. Previously the lowerings would produce
results as-if the input was 32-bits, but now I believe they all
correctly account for the bit-width.
This commit is contained in:
@@ -92,164 +92,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::Rotr | Opcode::Rotl => implemented_in_isle(ctx),
|
||||
|
||||
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
|
||||
let ty = ty.unwrap();
|
||||
let op_ty = match ty {
|
||||
I8 | I16 | I32 => I32,
|
||||
I64 | I128 => I64,
|
||||
_ => {
|
||||
return Err(CodegenError::Unsupported(format!(
|
||||
"{}: Unsupported type: {:?}",
|
||||
op, ty
|
||||
)))
|
||||
}
|
||||
};
|
||||
let bitop = match op {
|
||||
Opcode::Clz | Opcode::Cls | Opcode::Bitrev => BitOp::from((op, op_ty)),
|
||||
Opcode::Ctz => BitOp::from((Opcode::Bitrev, op_ty)),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if ty == I128 {
|
||||
let out_regs = get_output_reg(ctx, outputs[0]);
|
||||
let in_regs = put_input_in_regs(ctx, inputs[0]);
|
||||
|
||||
let in_lo = in_regs.regs()[0];
|
||||
let in_hi = in_regs.regs()[1];
|
||||
let out_lo = out_regs.regs()[0];
|
||||
let out_hi = out_regs.regs()[1];
|
||||
|
||||
if op == Opcode::Bitrev || op == Opcode::Ctz {
|
||||
ctx.emit(Inst::BitRR {
|
||||
rd: out_hi,
|
||||
rn: in_lo,
|
||||
op: bitop,
|
||||
});
|
||||
ctx.emit(Inst::BitRR {
|
||||
rd: out_lo,
|
||||
rn: in_hi,
|
||||
op: bitop,
|
||||
});
|
||||
}
|
||||
|
||||
if op == Opcode::Ctz {
|
||||
// We have reduced the problem to a clz by reversing the inputs previouly
|
||||
emit_clz_i128(ctx, out_regs.map(|r| r.to_reg()), out_regs);
|
||||
} else if op == Opcode::Clz {
|
||||
emit_clz_i128(ctx, in_regs, out_regs);
|
||||
} else if op == Opcode::Cls {
|
||||
// cls out_hi, in_hi
|
||||
// cls out_lo, in_lo
|
||||
// eon sign_eq, in_hi, in_lo
|
||||
// lsr sign_eq, sign_eq, #63
|
||||
// madd out_lo, out_lo, sign_eq, sign_eq
|
||||
// cmp out_hi, #63
|
||||
// csel out_lo, out_lo, xzr, eq
|
||||
// add out_lo, out_lo, out_hi
|
||||
// mov out_hi, 0
|
||||
|
||||
let sign_eq = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let xzr = writable_zero_reg();
|
||||
|
||||
ctx.emit(Inst::BitRR {
|
||||
rd: out_lo,
|
||||
rn: in_lo,
|
||||
op: bitop,
|
||||
});
|
||||
ctx.emit(Inst::BitRR {
|
||||
rd: out_hi,
|
||||
rn: in_hi,
|
||||
op: bitop,
|
||||
});
|
||||
ctx.emit(Inst::AluRRR {
|
||||
alu_op: ALUOp::EorNot64,
|
||||
rd: sign_eq,
|
||||
rn: in_hi,
|
||||
rm: in_lo,
|
||||
});
|
||||
ctx.emit(Inst::AluRRImmShift {
|
||||
alu_op: ALUOp::Lsr64,
|
||||
rd: sign_eq,
|
||||
rn: sign_eq.to_reg(),
|
||||
immshift: ImmShift::maybe_from_u64(63).unwrap(),
|
||||
});
|
||||
ctx.emit(Inst::AluRRRR {
|
||||
alu_op: ALUOp3::MAdd64,
|
||||
rd: out_lo,
|
||||
rn: out_lo.to_reg(),
|
||||
rm: sign_eq.to_reg(),
|
||||
ra: sign_eq.to_reg(),
|
||||
});
|
||||
ctx.emit(Inst::AluRRImm12 {
|
||||
alu_op: ALUOp::SubS64,
|
||||
rd: xzr,
|
||||
rn: out_hi.to_reg(),
|
||||
imm12: Imm12::maybe_from_u64(63).unwrap(),
|
||||
});
|
||||
ctx.emit(Inst::CSel {
|
||||
cond: Cond::Eq,
|
||||
rd: out_lo,
|
||||
rn: out_lo.to_reg(),
|
||||
rm: xzr.to_reg(),
|
||||
});
|
||||
ctx.emit(Inst::AluRRR {
|
||||
alu_op: ALUOp::Add64,
|
||||
rd: out_lo,
|
||||
rn: out_lo.to_reg(),
|
||||
rm: out_hi.to_reg(),
|
||||
});
|
||||
lower_constant_u64(ctx, out_hi, 0);
|
||||
}
|
||||
} else {
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let needs_zext = match op {
|
||||
Opcode::Bitrev | Opcode::Ctz => false,
|
||||
Opcode::Clz | Opcode::Cls => true,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let narrow_mode = if needs_zext && ty_bits(ty) == 64 {
|
||||
NarrowValueMode::ZeroExtend64
|
||||
} else if needs_zext {
|
||||
NarrowValueMode::ZeroExtend32
|
||||
} else {
|
||||
NarrowValueMode::None
|
||||
};
|
||||
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||
|
||||
ctx.emit(Inst::BitRR { rd, rn, op: bitop });
|
||||
|
||||
// Both bitrev and ctz use a bit-reverse (rbit) instruction; ctz to reduce the problem
|
||||
// to a clz, and bitrev as the main operation.
|
||||
if op == Opcode::Bitrev || op == Opcode::Ctz {
|
||||
// Reversing an n-bit value (n < 32) with a 32-bit bitrev instruction will place
|
||||
// the reversed result in the highest n bits, so we need to shift them down into
|
||||
// place.
|
||||
let right_shift = match ty {
|
||||
I8 => Some(24),
|
||||
I16 => Some(16),
|
||||
I32 => None,
|
||||
I64 => None,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
if let Some(s) = right_shift {
|
||||
ctx.emit(Inst::AluRRImmShift {
|
||||
alu_op: ALUOp::Lsr32,
|
||||
rd,
|
||||
rn: rd.to_reg(),
|
||||
immshift: ImmShift::maybe_from_u64(s).unwrap(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if op == Opcode::Ctz {
|
||||
ctx.emit(Inst::BitRR {
|
||||
op: BitOp::from((Opcode::Clz, op_ty)),
|
||||
rd,
|
||||
rn: rd.to_reg(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => implemented_in_isle(ctx),
|
||||
|
||||
Opcode::Popcnt => {
|
||||
let ty = ty.unwrap();
|
||||
|
||||
Reference in New Issue
Block a user