Merge pull request #3002 from afonso360/aarch64-i128-br

aarch64 implement brz,brnz,br_icmp for i128 values
This commit is contained in:
Chris Fallin
2021-06-21 10:52:50 -07:00
committed by GitHub
5 changed files with 902 additions and 206 deletions

View File

@@ -1226,12 +1226,43 @@ pub(crate) fn maybe_input_insn_via_conv<C: LowerCtx<I = Inst>>(
None
}
pub(crate) fn lower_icmp_or_ifcmp_to_flags<C: LowerCtx<I = Inst>>(
/// Specifies what [lower_icmp] should do when lowering
#[derive(Debug, Clone, PartialEq)]
pub(crate) enum IcmpOutput {
/// Only sets flags, discarding the results
Flags,
/// Materializes the results into a register. The flags set may be incorrect
Register(Writable<Reg>),
}
impl IcmpOutput {
pub fn reg(&self) -> Option<Writable<Reg>> {
match self {
IcmpOutput::Flags => None,
IcmpOutput::Register(reg) => Some(*reg),
}
}
}
/// Lower an icmp comparision
///
/// We can lower into the status flags, or materialize the result into a register
/// This is controlled by the `output` parameter.
pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
is_signed: bool,
) {
debug!("lower_icmp_or_ifcmp_to_flags: insn {}", insn);
condcode: IntCC,
output: IcmpOutput,
) -> CodegenResult<()> {
debug!(
"lower_icmp: insn {}, condcode: {}, output: {:?}",
insn, condcode, output
);
let rd = output.reg().unwrap_or(writable_zero_reg());
let inputs = insn_inputs(ctx, insn);
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
let ty = ctx.input_ty(insn, 0);
let bits = ty_bits(ty);
let narrow_mode = match (bits <= 32, is_signed) {
@@ -1240,14 +1271,149 @@ pub(crate) fn lower_icmp_or_ifcmp_to_flags<C: LowerCtx<I = Inst>>(
(false, true) => NarrowValueMode::SignExtend64,
(false, false) => NarrowValueMode::ZeroExtend64,
};
let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
let ty = ctx.input_ty(insn, 0);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_rse_imm12(ctx, inputs[1], narrow_mode);
debug!("lower_icmp_or_ifcmp_to_flags: rn = {:?} rm = {:?}", rn, rm);
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rd = writable_zero_reg();
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
if ty == I128 {
let lhs = put_input_in_regs(ctx, inputs[0]);
let rhs = put_input_in_regs(ctx, inputs[1]);
let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap();
match condcode {
IntCC::Equal | IntCC::NotEqual => {
// eor tmp1, lhs_lo, rhs_lo
// eor tmp2, lhs_hi, rhs_hi
// adds xzr, tmp1, tmp2
// cset dst, {eq, ne}
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64,
rd: tmp1,
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64,
rd: tmp2,
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64,
rd: writable_zero_reg(),
rn: tmp1.to_reg(),
rm: tmp2.to_reg(),
});
if let IcmpOutput::Register(rd) = output {
materialize_bool_result(ctx, insn, rd, cond);
}
}
IntCC::Overflow | IntCC::NotOverflow => {
// We can do an 128bit add while throwing away the results
// and check the overflow flags at the end.
//
// adds xzr, lhs_lo, rhs_lo
// adcs xzr, lhs_hi, rhs_hi
// cset dst, {vs, vc}
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64,
rd: writable_zero_reg(),
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AdcS64,
rd: writable_zero_reg(),
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
if let IcmpOutput::Register(rd) = output {
materialize_bool_result(ctx, insn, rd, cond);
}
}
_ => {
// cmp lhs_lo, rhs_lo
// cset tmp1, unsigned_cond
// cmp lhs_hi, rhs_hi
// cset tmp2, cond
// csel dst, tmp1, tmp2, eq
let rd = output.reg().unwrap_or(tmp1);
let unsigned_cond = lower_condcode(condcode.unsigned());
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: writable_zero_reg(),
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
materialize_bool_result(ctx, insn, tmp1, unsigned_cond);
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: writable_zero_reg(),
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
materialize_bool_result(ctx, insn, tmp2, cond);
ctx.emit(Inst::CSel {
cond: Cond::Eq,
rd,
rn: tmp1.to_reg(),
rm: tmp2.to_reg(),
});
if output == IcmpOutput::Flags {
// We only need to guarantee that the flags for `cond` are correct, so we can
// compare rd with 0 or 1
// If we are doing compare or equal, we want to compare with 1 instead of zero
if condcode.without_equal() != condcode {
lower_constant_u64(ctx, tmp2, 1);
}
let xzr = zero_reg();
let rd = rd.to_reg();
let tmp2 = tmp2.to_reg();
let (rn, rm) = match condcode {
IntCC::SignedGreaterThanOrEqual => (rd, tmp2),
IntCC::UnsignedGreaterThanOrEqual => (rd, tmp2),
IntCC::SignedLessThanOrEqual => (tmp2, rd),
IntCC::UnsignedLessThanOrEqual => (tmp2, rd),
IntCC::SignedGreaterThan => (rd, xzr),
IntCC::UnsignedGreaterThan => (rd, xzr),
IntCC::SignedLessThan => (xzr, rd),
IntCC::UnsignedLessThan => (xzr, rd),
_ => unreachable!(),
};
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: writable_zero_reg(),
rn,
rm,
});
}
}
}
} else if !ty.is_vector() {
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_rse_imm12(ctx, inputs[1], narrow_mode);
ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, rm));
if let IcmpOutput::Register(rd) = output {
materialize_bool_result(ctx, insn, rd, cond);
}
} else {
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
lower_vector_compare(ctx, rd, rn, rm, ty, cond)?;
}
Ok(())
}
pub(crate) fn lower_fcmp_or_ffcmp_to_flags<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst) {

View File

@@ -1,7 +1,7 @@
//! Lower a single Cranelift instruction into vcode.
use crate::binemit::CodeOffset;
use crate::ir::condcodes::{FloatCC, IntCC};
use crate::ir::condcodes::FloatCC;
use crate::ir::types::*;
use crate::ir::Inst as IRInst;
use crate::ir::{InstructionData, Opcode, TrapCode};
@@ -1631,8 +1631,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
{
let condcode = ctx.data(icmp_insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
lower_icmp_or_ifcmp_to_flags(ctx, icmp_insn, is_signed);
lower_icmp(ctx, icmp_insn, condcode, IcmpOutput::Flags)?;
cond
} else if let Some(fcmp_insn) =
maybe_input_insn_via_conv(ctx, flag_input, Opcode::Fcmp, Opcode::Bint)
@@ -1680,11 +1679,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Selectif | Opcode::SelectifSpectreGuard => {
let condcode = ctx.data(insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
// Verification ensures that the input is always a
// single-def ifcmp.
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
lower_icmp(ctx, ifcmp_insn, condcode, IcmpOutput::Flags)?;
// csel.COND rd, rn, rm
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
@@ -1751,14 +1749,11 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Trueif => {
let condcode = ctx.data(insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
// Verification ensures that the input is always a
// single-def ifcmp.
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
materialize_bool_result(ctx, insn, rd, cond);
lower_icmp(ctx, ifcmp_insn, condcode, IcmpOutput::Register(rd))?;
}
Opcode::Trueff => {
@@ -1950,126 +1945,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Icmp => {
let condcode = ctx.data(insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ctx.input_ty(insn, 0);
let bits = ty_bits(ty);
let narrow_mode = match (bits <= 32, is_signed) {
(true, true) => NarrowValueMode::SignExtend32,
(true, false) => NarrowValueMode::ZeroExtend32,
(false, true) => NarrowValueMode::SignExtend64,
(false, false) => NarrowValueMode::ZeroExtend64,
};
if ty == I128 {
let lhs = put_input_in_regs(ctx, inputs[0]);
let rhs = put_input_in_regs(ctx, inputs[1]);
let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap();
match condcode {
IntCC::Equal | IntCC::NotEqual => {
// eor tmp1, lhs_lo, rhs_lo
// eor tmp2, lhs_hi, rhs_hi
// adds xzr, tmp1, tmp2
// cset dst, {eq, ne}
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64,
rd: tmp1,
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64,
rd: tmp2,
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64,
rd: writable_zero_reg(),
rn: tmp1.to_reg(),
rm: tmp2.to_reg(),
});
materialize_bool_result(ctx, insn, rd, cond);
}
IntCC::Overflow | IntCC::NotOverflow => {
// We can do an 128bit add while throwing away the results
// and check the overflow flags at the end.
//
// adds xzr, lhs_lo, rhs_lo
// adcs xzr, lhs_hi, rhs_hi
// cset dst, {vs, vc}
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64,
rd: writable_zero_reg(),
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AdcS64,
rd: writable_zero_reg(),
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
materialize_bool_result(ctx, insn, rd, cond);
}
_ => {
// cmp lhs_lo, rhs_lo
// cset tmp1, low_cc
// cmp lhs_hi, rhs_hi
// cset tmp2, cond
// csel dst, tmp1, tmp2, eq
let low_cc = match condcode {
IntCC::SignedGreaterThanOrEqual | IntCC::UnsignedGreaterThanOrEqual => {
Cond::Hs
}
IntCC::SignedGreaterThan | IntCC::UnsignedGreaterThan => Cond::Hi,
IntCC::SignedLessThanOrEqual | IntCC::UnsignedLessThanOrEqual => {
Cond::Ls
}
IntCC::SignedLessThan | IntCC::UnsignedLessThan => Cond::Lo,
_ => unreachable!(),
};
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: writable_zero_reg(),
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
materialize_bool_result(ctx, insn, tmp1, low_cc);
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: writable_zero_reg(),
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
materialize_bool_result(ctx, insn, tmp2, cond);
ctx.emit(Inst::CSel {
cond: Cond::Eq,
rd,
rn: tmp1.to_reg(),
rm: tmp2.to_reg(),
});
}
}
} else if !ty.is_vector() {
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_rse_imm12(ctx, inputs[1], narrow_mode);
ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, rm));
materialize_bool_result(ctx, insn, rd, cond);
} else {
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
lower_vector_compare(ctx, rd, rn, rm, ty, cond)?;
}
lower_icmp(ctx, insn, condcode, IcmpOutput::Register(rd))?;
}
Opcode::Fcmp => {
@@ -2123,11 +2000,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else if op == Opcode::Trapif {
let condcode = ctx.data(insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
// Verification ensures that the input is always a single-def ifcmp.
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
lower_icmp(ctx, ifcmp_insn, condcode, IcmpOutput::Flags)?;
cond
} else {
let condcode = ctx.data(insn).fp_cond_code().unwrap();
@@ -3618,6 +3494,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
match op0 {
Opcode::Brz | Opcode::Brnz => {
let ty = ctx.input_ty(branches[0], 0);
let flag_input = InsnInput {
insn: branches[0],
input: 0,
@@ -3627,11 +3504,10 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
{
let condcode = ctx.data(icmp_insn).cond_code().unwrap();
let cond = lower_condcode(condcode);
let is_signed = condcode_is_signed(condcode);
let negated = op0 == Opcode::Brz;
let cond = if negated { cond.invert() } else { cond };
lower_icmp_or_ifcmp_to_flags(ctx, icmp_insn, is_signed);
lower_icmp(ctx, icmp_insn, condcode, IcmpOutput::Flags)?;
ctx.emit(Inst::CondBr {
taken,
not_taken,
@@ -3652,14 +3528,19 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
kind: CondBrKind::Cond(cond),
});
} else {
let rt = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
NarrowValueMode::ZeroExtend64,
);
let rt = if ty == I128 {
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
let input = put_input_in_regs(ctx, flag_input);
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Orr64,
rd: tmp,
rn: input.regs()[0],
rm: input.regs()[1],
});
tmp.to_reg()
} else {
put_input_in_reg(ctx, flag_input, NarrowValueMode::ZeroExtend64)
};
let kind = match op0 {
Opcode::Brz => CondBrKind::Zero(rt),
Opcode::Brnz => CondBrKind::NotZero(rt),
@@ -3677,35 +3558,7 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
let cond = lower_condcode(condcode);
let kind = CondBrKind::Cond(cond);
let is_signed = condcode_is_signed(condcode);
let ty = ctx.input_ty(branches[0], 0);
let bits = ty_bits(ty);
let narrow_mode = match (bits <= 32, is_signed) {
(true, true) => NarrowValueMode::SignExtend32,
(true, false) => NarrowValueMode::ZeroExtend32,
(false, true) => NarrowValueMode::SignExtend64,
(false, false) => NarrowValueMode::ZeroExtend64,
};
let rn = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
narrow_mode,
);
let rm = put_input_in_rse_imm12(
ctx,
InsnInput {
insn: branches[0],
input: 1,
},
narrow_mode,
);
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64);
let rd = writable_zero_reg();
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
lower_icmp(ctx, branches[0], condcode, IcmpOutput::Flags)?;
ctx.emit(Inst::CondBr {
taken,
not_taken,
@@ -3718,13 +3571,12 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
let cond = lower_condcode(condcode);
let kind = CondBrKind::Cond(cond);
let is_signed = condcode_is_signed(condcode);
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
if let Some(ifcmp_insn) = maybe_input_insn(ctx, flag_input, Opcode::Ifcmp) {
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
lower_icmp(ctx, ifcmp_insn, condcode, IcmpOutput::Flags)?;
ctx.emit(Inst::CondBr {
taken,
not_taken,