aarch64: Migrate iadd and isub to ISLE

This commit is the first "meaty" instruction added to ISLE for the
AArch64 backend. I chose to pick the first two in the current lowering's
`match` statement, `isub` and `iadd`. These two turned out to be
particularly interesting for a few reasons:

* Both had clearly migratable-to-ISLE behavior along the lines of
  special-casing per type. For example 128-bit and vector arithmetic
  were both easily translateable.

* The `iadd` instruction has special cases for fusing with a
  multiplication to generate `madd` which is expressed pretty easily in
  ISLE.

* Otherwise both instructions had a number of forms where they attempted
  to interpret the RHS as various forms of constants, extends, or
  shifts. There's a bit of a design space of how best to represent this
  in ISLE and what I settled on was to have a special case for each form
  of instruction, and the special cases are somewhat duplicated between
  `iadd` and `isub`. There's custom "extractors" for the special cases
  and instructions that support these special cases will have an
  `rule`-per-case.

Overall I think the ISLE transitioned pretty well. I don't think that
the aarch64 backend is going to follow the x64 backend super closely,
though. For example the x64 backend is having a helper-per-instruction
at the moment but with AArch64 it seems to make more sense to only have
a helper-per-enum-variant-of-`MInst`. This is because the same
instruction (e.g. `ALUOp::Sub32`) can be expressed with multiple
different forms depending on the payload.

It's worth noting that the ISLE looks like it's a good deal larger than
the code actually being removed from lowering as part of this commit. I
think this is deceptive though because a lot of the logic in
`put_input_in_rse_imm12_maybe_negated` and `alu_inst_imm12` is being
inlined into the ISLE definitions for each instruction instead of having
it all packed into the helper functions. Some of the "boilerplate" here
is the addition of various ISLE utilities as well.
This commit is contained in:
Alex Crichton
2021-11-17 13:19:47 -08:00
parent 352ee2b186
commit 7d0f6ab90f
15 changed files with 1076 additions and 301 deletions

View File

@@ -947,6 +947,13 @@
(Size64x2) (Size64x2)
)) ))
;; Helper for calculating the `VectorSize` corresponding to a type
(decl vector_size (Type) VectorSize)
(rule (vector_size (multi_lane 8 16)) (VectorSize.Size8x16))
(rule (vector_size (multi_lane 16 8)) (VectorSize.Size16x8))
(rule (vector_size (multi_lane 32 4)) (VectorSize.Size32x4))
(rule (vector_size (multi_lane 64 2)) (VectorSize.Size64x2))
;; A floating-point unit (FPU) operation with one arg. ;; A floating-point unit (FPU) operation with one arg.
(type FPUOp1 (type FPUOp1
(enum (enum
@@ -1294,9 +1301,41 @@
(decl imm_logic_from_u64 (ImmLogic) u64) (decl imm_logic_from_u64 (ImmLogic) u64)
(extern extractor imm_logic_from_u64 imm_logic_from_u64) (extern extractor imm_logic_from_u64 imm_logic_from_u64)
(decl imm12_from_u64 (Imm12) u64)
(extern extractor imm12_from_u64 imm12_from_u64)
(decl imm12_from_negated_u64 (Imm12) u64)
(extern extractor imm12_from_negated_u64 imm12_from_negated_u64)
(decl lshl_from_imm64 (Type ShiftOpAndAmt) Imm64)
(extern extractor lshl_from_imm64 lshl_from_imm64 (in out))
(decl integral_ty (Type) Type) (decl integral_ty (Type) Type)
(extern extractor integral_ty integral_ty) (extern extractor integral_ty integral_ty)
;; Helper to go directly from a `Value`, when it's an `iconst`, to an `Imm12`.
(decl imm12_from_value (Imm12) Value)
(extractor
(imm12_from_value n)
(def_inst (iconst (u64_from_imm64 (imm12_from_u64 n)))))
;; Same as `imm12_from_value`, but tries negating the constant value.
(decl imm12_from_negated_value (Imm12) Value)
(extractor
(imm12_from_negated_value n)
(def_inst (iconst (u64_from_imm64 (imm12_from_negated_u64 n)))))
;; Helper type to represent a value and an extend operation fused together.
(type ExtendedValue extern (enum))
(decl extended_value_from_value (ExtendedValue) Value)
(extern extractor extended_value_from_value extended_value_from_value)
;; Constructors used to poke at the fields of an `ExtendedValue`.
(decl put_extended_in_reg (ExtendedValue) Reg)
(extern constructor put_extended_in_reg put_extended_in_reg)
(decl get_extended_op (ExtendedValue) ExtendOp)
(extern constructor get_extended_op get_extended_op)
;; Instruction creation helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Instruction creation helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Emit an instruction. ;; Emit an instruction.
@@ -1331,9 +1370,55 @@
(_ Unit (emit (MInst.AluRRImmLogic op dst src imm)))) (_ Unit (emit (MInst.AluRRImmLogic op dst src imm))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `orr64` instructions. ;; Helper for emitting `MInst.AluRRR` instructions.
(decl orr64 (Reg ImmLogic) Reg) (decl alu_rrr (ALUOp Reg Reg) Reg)
(rule (orr64 src imm) (alu_rr_imm_logic (ALUOp.Orr64) src imm)) (rule (alu_rrr op src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRR op dst src1 src2))))
(writable_reg_to_reg dst)))
;; Helper for emitting `MInst.VecRRR` instructions.
(decl vec_rrr (VecALUOp Reg Reg VectorSize) Reg)
(rule (vec_rrr op src1 src2 size)
(let ((dst WritableReg (temp_writable_reg $I8X16))
(_ Unit (emit (MInst.VecRRR op dst src1 src2 size))))
(writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRImm12` instructions.
(decl alu_rr_imm12 (ALUOp Reg Imm12) Reg)
(rule (alu_rr_imm12 op src imm)
(let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRImm12 op dst src imm))))
(writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRRShift` instructions.
(decl alu_rrr_shift (ALUOp Reg Reg ShiftOpAndAmt) Reg)
(rule (alu_rrr_shift op src1 src2 shift)
(let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRRShift op dst src1 src2 shift))))
(writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRRExtend` instructions.
(decl alu_rrr_extend (ALUOp Reg Reg ExtendOp) Reg)
(rule (alu_rrr_extend op src1 src2 extend)
(let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRRExtend op dst src1 src2 extend))))
(writable_reg_to_reg dst)))
;; Same as `alu_rrr_extend`, but takes an `ExtendedValue` packed "pair" instead
;; of a `Reg` and an `ExtendOp`.
(decl alu_rr_extend_reg (ALUOp Reg ExtendedValue) Reg)
(rule (alu_rr_extend_reg op src1 extended_reg)
(let ((src2 Reg (put_extended_in_reg extended_reg))
(extend ExtendOp (get_extended_op extended_reg)))
(alu_rrr_extend op src1 src2 extend)))
;; Helper for emitting `MInst.AluRRRR` instructions.
(decl alu_rrrr (ALUOp3 Reg Reg Reg) Reg)
(rule (alu_rrrr op src1 src2 src3)
(let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRRR op dst src1 src2 src3))))
(writable_reg_to_reg dst)))
;; Immediate value helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Immediate value helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -1349,7 +1434,7 @@
;; Weird logical-instruction immediate in ORI using zero register ;; Weird logical-instruction immediate in ORI using zero register
(rule (imm (integral_ty _ty) (imm_logic_from_u64 n)) (rule (imm (integral_ty _ty) (imm_logic_from_u64 n))
(orr64 (zero_reg) n)) (alu_rr_imm_logic (ALUOp.Orr64) (zero_reg) n))
(decl load_constant64_full (u64) Reg) (decl load_constant64_full (u64) Reg)
(extern constructor load_constant64_full load_constant64_full) (extern constructor load_constant64_full load_constant64_full)

View File

@@ -62,7 +62,7 @@ impl ShiftOpShiftImm {
} }
/// A shift operator with an amount, guaranteed to be within range. /// A shift operator with an amount, guaranteed to be within range.
#[derive(Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct ShiftOpAndAmt { pub struct ShiftOpAndAmt {
op: ShiftOp, op: ShiftOp,
shift: ShiftOpShiftImm, shift: ShiftOpShiftImm,

View File

@@ -272,7 +272,7 @@ impl UImm12Scaled {
/// A shifted immediate value in 'imm12' format: supports 12 bits, shifted /// A shifted immediate value in 'imm12' format: supports 12 bits, shifted
/// left by 0 or 12 places. /// left by 0 or 12 places.
#[derive(Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct Imm12 { pub struct Imm12 {
/// The immediate bits. /// The immediate bits.
pub bits: u16, pub bits: u16,

View File

@@ -21,3 +21,128 @@
(rule (lower (has_type ty (null))) (rule (lower (has_type ty (null)))
(value_reg (imm ty 0))) (value_reg (imm ty 0)))
;;;; Rules for `iadd` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; `i64` and smaller
;; base case, simply adding things in registers
(rule (lower (has_type (fits_in_64 ty) (iadd x y)))
(value_reg (alu_rrr (iadd_op ty) (put_in_reg x) (put_in_reg y))))
;; special case for when one operand is an immediate that fits in 12 bits
(rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_value y))))
(value_reg (alu_rr_imm12 (iadd_op ty) (put_in_reg x) y)))
;; same as the previous special case, except we can switch the addition to a
;; subtraction if the negated immediate fits in 12 bits.
(rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_negated_value y))))
(value_reg (alu_rr_imm12 (isub_op ty) (put_in_reg x) y)))
;; special case for when we're adding an extended register where the extending
;; operation can get folded into the add itself.
(rule (lower (has_type (fits_in_64 ty) (iadd x (extended_value_from_value y))))
(value_reg (alu_rr_extend_reg (iadd_op ty) (put_in_reg x) y)))
;; special case for when we're adding the shift of a different
;; register by a constant amount and the shift can get folded into the add.
(rule (lower (has_type (fits_in_64 ty)
(iadd x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))))
(value_reg (alu_rrr_shift (iadd_op ty) (put_in_reg x) (put_in_reg y) amt)))
;; Fold an `iadd` and `imul` combination into a `madd` instruction
(rule (lower (has_type (fits_in_64 ty) (iadd x (def_inst (imul y z)))))
(value_reg (alu_rrrr (madd_op ty) (put_in_reg y) (put_in_reg z) (put_in_reg x))))
(rule (lower (has_type (fits_in_64 ty) (iadd (def_inst (imul x y)) z)))
(value_reg (alu_rrrr (madd_op ty) (put_in_reg x) (put_in_reg y) (put_in_reg z))))
;; helper to use either a 32 or 64-bit add depending on the input type
(decl iadd_op (Type) ALUOp)
(rule (iadd_op (fits_in_32 _ty)) (ALUOp.Add32))
(rule (iadd_op _ty) (ALUOp.Add64))
;; helper to use either a 32 or 64-bit sub depending on the input type
(decl isub_op (Type) ALUOp)
(rule (isub_op (fits_in_32 _ty)) (ALUOp.Sub32))
(rule (isub_op _ty) (ALUOp.Sub64))
;; helper to use either a 32 or 64-bit madd depending on the input type
(decl madd_op (Type) ALUOp3)
(rule (madd_op (fits_in_32 _ty)) (ALUOp3.MAdd32))
(rule (madd_op _ty) (ALUOp3.MAdd64))
;; vectors
(rule (lower (has_type ty @ (multi_lane _ _) (iadd x y)))
(value_reg (vec_rrr (VecALUOp.Add) (put_in_reg x) (put_in_reg y) (vector_size ty))))
;; `i128`
(rule (lower (has_type $I128 (iadd x y)))
(let (
;; Get the high/low registers for `x`.
(x_regs ValueRegs (put_in_regs x))
(x_lo Reg (value_regs_get x_regs 0))
(x_hi Reg (value_regs_get x_regs 1))
;; Get the high/low registers for `y`.
(y_regs ValueRegs (put_in_regs y))
(y_lo Reg (value_regs_get y_regs 0))
(y_hi Reg (value_regs_get y_regs 1))
)
;; the actual addition is `adds` followed by `adc` which comprises the
;; low/high bits of the result
(value_regs
(alu_rrr (ALUOp.AddS64) x_lo y_lo)
(alu_rrr (ALUOp.Adc64) x_hi y_hi))))
;;;; Rules for `isub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; `i64` and smaller
;; base case, simply subtracting things in registers
(rule (lower (has_type (fits_in_64 ty) (isub x y)))
(value_reg (alu_rrr (isub_op ty) (put_in_reg x) (put_in_reg y))))
;; special case for when one operand is an immediate that fits in 12 bits
(rule (lower (has_type (fits_in_64 ty) (isub x (imm12_from_value y))))
(value_reg (alu_rr_imm12 (isub_op ty) (put_in_reg x) y)))
;; same as the previous special case, except we can switch the subtraction to an
;; addition if the negated immediate fits in 12 bits.
(rule (lower (has_type (fits_in_64 ty) (isub x (imm12_from_negated_value y))))
(value_reg (alu_rr_imm12 (iadd_op ty) (put_in_reg x) y)))
;; special cases for when we're subtracting an extended register where the
;; extending operation can get folded into the sub itself.
(rule (lower (has_type (fits_in_64 ty) (isub x (extended_value_from_value y))))
(value_reg (alu_rr_extend_reg (isub_op ty) (put_in_reg x) y)))
;; finally a special case for when we're subtracting the shift of a different
;; register by a constant amount and the shift can get folded into the sub.
(rule (lower (has_type (fits_in_64 ty)
(isub x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))))
(value_reg (alu_rrr_shift (isub_op ty) (put_in_reg x) (put_in_reg y) amt)))
;; vectors
(rule (lower (has_type ty @ (multi_lane _ _) (isub x y)))
(value_reg (vec_rrr (VecALUOp.Sub) (put_in_reg x) (put_in_reg y) (vector_size ty))))
;; `i128`
(rule (lower (has_type $I128 (isub x y)))
(let (
;; Get the high/low registers for `x`.
(x_regs ValueRegs (put_in_regs x))
(x_lo Reg (value_regs_get x_regs 0))
(x_hi Reg (value_regs_get x_regs 1))
;; Get the high/low registers for `y`.
(y_regs ValueRegs (put_in_regs y))
(y_lo Reg (value_regs_get y_regs 0))
(y_hi Reg (value_regs_get y_regs 1))
)
;; the actual subtraction is `subs` followed by `sbc` which comprises
;; the low/high bits of the result
(value_regs
(alu_rrr (ALUOp.SubS64) x_lo y_lo)
(alu_rrr (ALUOp.Sbc64) x_hi y_hi))))

View File

@@ -10,7 +10,7 @@
use crate::ir::condcodes::{FloatCC, IntCC}; use crate::ir::condcodes::{FloatCC, IntCC};
use crate::ir::types::*; use crate::ir::types::*;
use crate::ir::Inst as IRInst; use crate::ir::Inst as IRInst;
use crate::ir::{Opcode, Type}; use crate::ir::{Opcode, Type, Value};
use crate::machinst::lower::*; use crate::machinst::lower::*;
use crate::machinst::*; use crate::machinst::*;
use crate::{CodegenError, CodegenResult}; use crate::{CodegenError, CodegenResult};
@@ -272,20 +272,20 @@ fn extend_reg<C: LowerCtx<I = Inst>>(
} }
/// Lowers an instruction input to multiple regs /// Lowers an instruction input to multiple regs
fn lower_input_to_regs<C: LowerCtx<I = Inst>>( fn lower_value_to_regs<C: LowerCtx<I = Inst>>(
ctx: &mut C, ctx: &mut C,
input: InsnInput, value: Value,
) -> (ValueRegs<Reg>, Type, bool) { ) -> (ValueRegs<Reg>, Type, bool) {
log::trace!("lower_input_to_regs: input {:?}", input); log::trace!("lower_value_to_regs: value {:?}", value);
let ty = ctx.input_ty(input.insn, input.input); let ty = ctx.value_ty(value);
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input); let inputs = ctx.get_value_as_source_or_const(value);
let is_const = inputs.constant.is_some(); let is_const = inputs.constant.is_some();
let in_regs = if let Some(c) = inputs.constant { let in_regs = if let Some(c) = inputs.constant {
// Generate constants fresh at each use to minimize long-range register pressure. // Generate constants fresh at each use to minimize long-range register pressure.
generate_constant(ctx, ty, c as u128) generate_constant(ctx, ty, c as u128)
} else { } else {
ctx.put_input_in_regs(input.insn, input.input) ctx.put_value_in_regs(value)
}; };
(in_regs, ty, is_const) (in_regs, ty, is_const)
@@ -301,7 +301,17 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
input: InsnInput, input: InsnInput,
narrow_mode: NarrowValueMode, narrow_mode: NarrowValueMode,
) -> Reg { ) -> Reg {
let (in_regs, ty, is_const) = lower_input_to_regs(ctx, input); let value = ctx.input_as_value(input.insn, input.input);
put_value_in_reg(ctx, value, narrow_mode)
}
/// Like above, only for values
fn put_value_in_reg<C: LowerCtx<I = Inst>>(
ctx: &mut C,
value: Value,
narrow_mode: NarrowValueMode,
) -> Reg {
let (in_regs, ty, is_const) = lower_value_to_regs(ctx, value);
let reg = in_regs let reg = in_regs
.only_reg() .only_reg()
.expect("Multi-register value not expected"); .expect("Multi-register value not expected");
@@ -314,7 +324,8 @@ pub(crate) fn put_input_in_regs<C: LowerCtx<I = Inst>>(
ctx: &mut C, ctx: &mut C,
input: InsnInput, input: InsnInput,
) -> ValueRegs<Reg> { ) -> ValueRegs<Reg> {
let (in_regs, _, _) = lower_input_to_regs(ctx, input); let value = ctx.input_as_value(input.insn, input.input);
let (in_regs, _, _) = lower_value_to_regs(ctx, value);
in_regs in_regs
} }
@@ -367,89 +378,94 @@ fn put_input_in_rse<C: LowerCtx<I = Inst>>(
input: InsnInput, input: InsnInput,
narrow_mode: NarrowValueMode, narrow_mode: NarrowValueMode,
) -> ResultRSE { ) -> ResultRSE {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input); let value = ctx.input_as_value(input.insn, input.input);
if let Some((insn, 0)) = inputs.inst { if let Some((val, extendop)) = get_as_extended_value(ctx, value, narrow_mode) {
let op = ctx.data(insn).opcode(); let reg = put_value_in_reg(ctx, val, NarrowValueMode::None);
let out_ty = ctx.output_ty(insn, 0); return ResultRSE::RegExtend(reg, extendop);
let out_bits = ty_bits(out_ty);
// Is this a zero-extend or sign-extend and can we handle that with a register-mode operator?
if op == Opcode::Uextend || op == Opcode::Sextend {
let sign_extend = op == Opcode::Sextend;
let inner_ty = ctx.input_ty(insn, 0);
let inner_bits = ty_bits(inner_ty);
assert!(inner_bits < out_bits);
if match (sign_extend, narrow_mode) {
// A single zero-extend or sign-extend is equal to itself.
(_, NarrowValueMode::None) => true,
// Two zero-extends or sign-extends in a row is equal to a single zero-extend or sign-extend.
(false, NarrowValueMode::ZeroExtend32) | (false, NarrowValueMode::ZeroExtend64) => {
true
}
(true, NarrowValueMode::SignExtend32) | (true, NarrowValueMode::SignExtend64) => {
true
}
// A zero-extend and a sign-extend in a row is not equal to a single zero-extend or sign-extend
(false, NarrowValueMode::SignExtend32) | (false, NarrowValueMode::SignExtend64) => {
false
}
(true, NarrowValueMode::ZeroExtend32) | (true, NarrowValueMode::ZeroExtend64) => {
false
}
} {
let extendop = match (sign_extend, inner_bits) {
(true, 8) => ExtendOp::SXTB,
(false, 8) => ExtendOp::UXTB,
(true, 16) => ExtendOp::SXTH,
(false, 16) => ExtendOp::UXTH,
(true, 32) => ExtendOp::SXTW,
(false, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
let reg =
put_input_in_reg(ctx, InsnInput { insn, input: 0 }, NarrowValueMode::None);
return ResultRSE::RegExtend(reg, extendop);
}
}
// If `out_ty` is smaller than 32 bits and we need to zero- or sign-extend,
// then get the result into a register and return an Extend-mode operand on
// that register.
if narrow_mode != NarrowValueMode::None
&& ((narrow_mode.is_32bit() && out_bits < 32)
|| (!narrow_mode.is_32bit() && out_bits < 64))
{
let reg = put_input_in_reg(ctx, input, NarrowValueMode::None);
let extendop = match (narrow_mode, out_bits) {
(NarrowValueMode::SignExtend32, 1) | (NarrowValueMode::SignExtend64, 1) => {
ExtendOp::SXTB
}
(NarrowValueMode::ZeroExtend32, 1) | (NarrowValueMode::ZeroExtend64, 1) => {
ExtendOp::UXTB
}
(NarrowValueMode::SignExtend32, 8) | (NarrowValueMode::SignExtend64, 8) => {
ExtendOp::SXTB
}
(NarrowValueMode::ZeroExtend32, 8) | (NarrowValueMode::ZeroExtend64, 8) => {
ExtendOp::UXTB
}
(NarrowValueMode::SignExtend32, 16) | (NarrowValueMode::SignExtend64, 16) => {
ExtendOp::SXTH
}
(NarrowValueMode::ZeroExtend32, 16) | (NarrowValueMode::ZeroExtend64, 16) => {
ExtendOp::UXTH
}
(NarrowValueMode::SignExtend64, 32) => ExtendOp::SXTW,
(NarrowValueMode::ZeroExtend64, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
return ResultRSE::RegExtend(reg, extendop);
}
} }
ResultRSE::from_rs(put_input_in_rs(ctx, input, narrow_mode)) ResultRSE::from_rs(put_input_in_rs(ctx, input, narrow_mode))
} }
fn get_as_extended_value<C: LowerCtx<I = Inst>>(
ctx: &mut C,
val: Value,
narrow_mode: NarrowValueMode,
) -> Option<(Value, ExtendOp)> {
let inputs = ctx.get_value_as_source_or_const(val);
let (insn, n) = inputs.inst?;
if n != 0 {
return None;
}
let op = ctx.data(insn).opcode();
let out_ty = ctx.output_ty(insn, 0);
let out_bits = ty_bits(out_ty);
// Is this a zero-extend or sign-extend and can we handle that with a register-mode operator?
if op == Opcode::Uextend || op == Opcode::Sextend {
let sign_extend = op == Opcode::Sextend;
let inner_ty = ctx.input_ty(insn, 0);
let inner_bits = ty_bits(inner_ty);
assert!(inner_bits < out_bits);
if match (sign_extend, narrow_mode) {
// A single zero-extend or sign-extend is equal to itself.
(_, NarrowValueMode::None) => true,
// Two zero-extends or sign-extends in a row is equal to a single zero-extend or sign-extend.
(false, NarrowValueMode::ZeroExtend32) | (false, NarrowValueMode::ZeroExtend64) => true,
(true, NarrowValueMode::SignExtend32) | (true, NarrowValueMode::SignExtend64) => true,
// A zero-extend and a sign-extend in a row is not equal to a single zero-extend or sign-extend
(false, NarrowValueMode::SignExtend32) | (false, NarrowValueMode::SignExtend64) => {
false
}
(true, NarrowValueMode::ZeroExtend32) | (true, NarrowValueMode::ZeroExtend64) => false,
} {
let extendop = match (sign_extend, inner_bits) {
(true, 8) => ExtendOp::SXTB,
(false, 8) => ExtendOp::UXTB,
(true, 16) => ExtendOp::SXTH,
(false, 16) => ExtendOp::UXTH,
(true, 32) => ExtendOp::SXTW,
(false, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
return Some((ctx.input_as_value(insn, 0), extendop));
}
}
// If `out_ty` is smaller than 32 bits and we need to zero- or sign-extend,
// then get the result into a register and return an Extend-mode operand on
// that register.
if narrow_mode != NarrowValueMode::None
&& ((narrow_mode.is_32bit() && out_bits < 32) || (!narrow_mode.is_32bit() && out_bits < 64))
{
let extendop = match (narrow_mode, out_bits) {
(NarrowValueMode::SignExtend32, 1) | (NarrowValueMode::SignExtend64, 1) => {
ExtendOp::SXTB
}
(NarrowValueMode::ZeroExtend32, 1) | (NarrowValueMode::ZeroExtend64, 1) => {
ExtendOp::UXTB
}
(NarrowValueMode::SignExtend32, 8) | (NarrowValueMode::SignExtend64, 8) => {
ExtendOp::SXTB
}
(NarrowValueMode::ZeroExtend32, 8) | (NarrowValueMode::ZeroExtend64, 8) => {
ExtendOp::UXTB
}
(NarrowValueMode::SignExtend32, 16) | (NarrowValueMode::SignExtend64, 16) => {
ExtendOp::SXTH
}
(NarrowValueMode::ZeroExtend32, 16) | (NarrowValueMode::ZeroExtend64, 16) => {
ExtendOp::UXTH
}
(NarrowValueMode::SignExtend64, 32) => ExtendOp::SXTW,
(NarrowValueMode::ZeroExtend64, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
return Some((val, extendop));
}
None
}
pub(crate) fn put_input_in_rse_imm12<C: LowerCtx<I = Inst>>( pub(crate) fn put_input_in_rse_imm12<C: LowerCtx<I = Inst>>(
ctx: &mut C, ctx: &mut C,
input: InsnInput, input: InsnInput,
@@ -472,35 +488,6 @@ pub(crate) fn put_input_in_rse_imm12<C: LowerCtx<I = Inst>>(
ResultRSEImm12::from_rse(put_input_in_rse(ctx, input, narrow_mode)) ResultRSEImm12::from_rse(put_input_in_rse(ctx, input, narrow_mode))
} }
/// Like `put_input_in_rse_imm12` above, except is allowed to negate the
/// argument (assuming a two's-complement representation with the given bit
/// width) if this allows use of 12-bit immediate. Used to flip `add`s with
/// negative immediates to `sub`s (and vice-versa).
pub(crate) fn put_input_in_rse_imm12_maybe_negated<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
twos_complement_bits: usize,
narrow_mode: NarrowValueMode,
) -> (ResultRSEImm12, bool) {
assert!(twos_complement_bits <= 64);
if let Some(imm_value) = input_to_const(ctx, input) {
if let Some(i) = Imm12::maybe_from_u64(imm_value) {
return (ResultRSEImm12::Imm12(i), false);
}
let sign_extended =
((imm_value as i64) << (64 - twos_complement_bits)) >> (64 - twos_complement_bits);
let inverted = sign_extended.wrapping_neg();
if let Some(i) = Imm12::maybe_from_u64(inverted as u64) {
return (ResultRSEImm12::Imm12(i), true);
}
}
(
ResultRSEImm12::from_rse(put_input_in_rse(ctx, input, narrow_mode)),
false,
)
}
pub(crate) fn put_input_in_rs_immlogic<C: LowerCtx<I = Inst>>( pub(crate) fn put_input_in_rs_immlogic<C: LowerCtx<I = Inst>>(
ctx: &mut C, ctx: &mut C,
input: InsnInput, input: InsnInput,

View File

@@ -7,8 +7,8 @@ pub mod generated_code;
use super::{ use super::{
zero_reg, AMode, ASIMDFPModImm, ASIMDMovModImm, AtomicRmwOp, BranchTarget, CallIndInfo, zero_reg, AMode, ASIMDFPModImm, ASIMDMovModImm, AtomicRmwOp, BranchTarget, CallIndInfo,
CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI, Imm12, ImmLogic, ImmShift, Inst as MInst, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI, Imm12, ImmLogic, ImmShift, Inst as MInst,
JTSequenceInfo, MachLabel, MoveWideConst, Opcode, OperandSize, PairAMode, Reg, ScalarSize, JTSequenceInfo, MachLabel, MoveWideConst, NarrowValueMode, Opcode, OperandSize, PairAMode, Reg,
ShiftOpAndAmt, UImm5, VectorSize, NZCV, ScalarSize, ShiftOpAndAmt, UImm5, VectorSize, NZCV,
}; };
use crate::isa::aarch64::settings as aarch64_settings; use crate::isa::aarch64::settings as aarch64_settings;
use crate::machinst::isle::*; use crate::machinst::isle::*;
@@ -19,8 +19,9 @@ use crate::{
ValueLabel, ValueList, ValueLabel, ValueList,
}, },
isa::aarch64::inst::aarch64_map_regs, isa::aarch64::inst::aarch64_map_regs,
isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
isa::unwind::UnwindInst, isa::unwind::UnwindInst,
machinst::{get_output_reg, InsnOutput, LowerCtx, RegRenamer}, machinst::{get_output_reg, ty_bits, InsnOutput, LowerCtx, RegRenamer},
}; };
use smallvec::SmallVec; use smallvec::SmallVec;
use std::boxed::Box; use std::boxed::Box;
@@ -76,6 +77,11 @@ pub struct IsleContext<'a, C> {
emitted_insts: SmallVec<[MInst; 6]>, emitted_insts: SmallVec<[MInst; 6]>,
} }
pub struct ExtendedValue {
val: Value,
extend: ExtendOp,
}
impl<'a, C> IsleContext<'a, C> { impl<'a, C> IsleContext<'a, C> {
pub fn new(lower_ctx: &'a mut C, isa_flags: &'a aarch64_settings::Flags) -> Self { pub fn new(lower_ctx: &'a mut C, isa_flags: &'a aarch64_settings::Flags) -> Self {
IsleContext { IsleContext {
@@ -108,6 +114,25 @@ where
ImmLogic::maybe_from_u64(n, I64) ImmLogic::maybe_from_u64(n, I64)
} }
fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {
Imm12::maybe_from_u64(n)
}
fn imm12_from_negated_u64(&mut self, n: u64) -> Option<Imm12> {
Imm12::maybe_from_u64((n as i64).wrapping_neg() as u64)
}
fn lshl_from_imm64(&mut self, n: Imm64, ty: Type) -> Option<ShiftOpAndAmt> {
let shiftimm = ShiftOpShiftImm::maybe_from_shift(n.bits() as u64)?;
let shiftee_bits = ty_bits(ty);
if shiftee_bits <= std::u8::MAX as usize {
let shiftimm = shiftimm.mask(shiftee_bits as u8);
Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))
} else {
None
}
}
fn integral_ty(&mut self, ty: Type) -> Option<Type> { fn integral_ty(&mut self, ty: Type) -> Option<Type> {
match ty { match ty {
I8 | I16 | I32 | I64 | R64 => Some(ty), I8 | I16 | I32 | I64 | R64 => Some(ty),
@@ -181,6 +206,20 @@ where
zero_reg() zero_reg()
} }
fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {
let (val, extend) =
super::get_as_extended_value(self.lower_ctx, val, NarrowValueMode::None)?;
Some(ExtendedValue { val, extend })
}
fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {
self.put_in_reg(reg.val)
}
fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {
reg.extend
}
fn emit(&mut self, inst: &MInst) -> Unit { fn emit(&mut self, inst: &MInst) -> Unit {
self.emitted_insts.push(inst.clone()); self.emitted_insts.push(inst.clone());
} }

View File

@@ -1,4 +1,10 @@
src/clif.isle 9c0563583e5500de00ec5e226edc0547ac3ea789c8d76f1da0401c80ec619320fdc9a6f17fd76bbcac74a5894f85385c1f51c900c2b83bc9906d03d0f29bf5cb src/clif.isle 9c0563583e5500de00ec5e226edc0547ac3ea789c8d76f1da0401c80ec619320fdc9a6f17fd76bbcac74a5894f85385c1f51c900c2b83bc9906d03d0f29bf5cb
<<<<<<< HEAD
src/prelude.isle c1391bcd436c23caf46b909ba7b5a352405014f0c393e3886cf1b9ad37f610b0563e8a64daad215f107395e6bb55744d955dd9c6344bb19b96587c2deb703462 src/prelude.isle c1391bcd436c23caf46b909ba7b5a352405014f0c393e3886cf1b9ad37f610b0563e8a64daad215f107395e6bb55744d955dd9c6344bb19b96587c2deb703462
src/isa/aarch64/inst.isle 841748c9c5900821b7086a09a41c6dcdb2172eb47a45293b6ef10f2e1f1389620bf6a2c75152af807d8bc8929029a357af5191f5d87bac2c9ec54bf63a9a2a8f src/isa/aarch64/inst.isle 841748c9c5900821b7086a09a41c6dcdb2172eb47a45293b6ef10f2e1f1389620bf6a2c75152af807d8bc8929029a357af5191f5d87bac2c9ec54bf63a9a2a8f
src/isa/aarch64/lower.isle b3cd0834484e543f39d477d47ee66042276e99955c21fb8c9340a5f27ac317936acb2907a30f758bf596066e36db801a179fda6dbcecaee758a0187a5a5f1412 src/isa/aarch64/lower.isle b3cd0834484e543f39d477d47ee66042276e99955c21fb8c9340a5f27ac317936acb2907a30f758bf596066e36db801a179fda6dbcecaee758a0187a5a5f1412
=======
src/prelude.isle bed4d567d548a0df8c975ac88275b75547494cf761c63b9775d117396382eb1302777de53889804ddc5dee96d04a6764cff59d43a4d4a614fb50f95dc1e44749
src/isa/aarch64/inst.isle 48d55da36f739bca3a748ee39a1c52a87ae8e0b4900e0e05e90f5f79174ffee1a913a85f258b73122e9e31c5d8a5d96d6ae4baae9f7eb40e4f0b47b37a3bc875
src/isa/aarch64/lower.isle 25146b36406baa91f81b44b0f54a5948e688af72946f0b70495c22396e275f60d038c52632e5a9fa173cbbd2b901959721679e95f8f6177ec89a7ec04b5f2a30
>>>>>>> aarch64: Migrate `iadd` and `isub` to ISLE

View File

@@ -31,6 +31,7 @@ pub trait Context {
fn u16_as_u64(&mut self, arg0: u16) -> u64; fn u16_as_u64(&mut self, arg0: u16) -> u64;
fn u32_as_u64(&mut self, arg0: u32) -> u64; fn u32_as_u64(&mut self, arg0: u32) -> u64;
fn ty_bits(&mut self, arg0: Type) -> u16; fn ty_bits(&mut self, arg0: Type) -> u16;
fn fits_in_32(&mut self, arg0: Type) -> Option<Type>;
fn fits_in_64(&mut self, arg0: Type) -> Option<Type>; fn fits_in_64(&mut self, arg0: Type) -> Option<Type>;
fn value_list_slice(&mut self, arg0: ValueList) -> ValueSlice; fn value_list_slice(&mut self, arg0: ValueList) -> ValueSlice;
fn unwrap_head_value_list_1(&mut self, arg0: ValueList) -> (Value, ValueSlice); fn unwrap_head_value_list_1(&mut self, arg0: ValueList) -> (Value, ValueSlice);
@@ -49,7 +50,13 @@ pub trait Context {
fn move_wide_const_from_u64(&mut self, arg0: u64) -> Option<MoveWideConst>; fn move_wide_const_from_u64(&mut self, arg0: u64) -> Option<MoveWideConst>;
fn move_wide_const_from_negated_u64(&mut self, arg0: u64) -> Option<MoveWideConst>; fn move_wide_const_from_negated_u64(&mut self, arg0: u64) -> Option<MoveWideConst>;
fn imm_logic_from_u64(&mut self, arg0: u64) -> Option<ImmLogic>; fn imm_logic_from_u64(&mut self, arg0: u64) -> Option<ImmLogic>;
fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12>;
fn imm12_from_negated_u64(&mut self, arg0: u64) -> Option<Imm12>;
fn lshl_from_imm64(&mut self, arg0: Imm64, arg1: Type) -> Option<ShiftOpAndAmt>;
fn integral_ty(&mut self, arg0: Type) -> Option<Type>; fn integral_ty(&mut self, arg0: Type) -> Option<Type>;
fn extended_value_from_value(&mut self, arg0: Value) -> Option<ExtendedValue>;
fn put_extended_in_reg(&mut self, arg0: &ExtendedValue) -> Reg;
fn get_extended_op(&mut self, arg0: &ExtendedValue) -> ExtendOp;
fn emit(&mut self, arg0: &MInst) -> Unit; fn emit(&mut self, arg0: &MInst) -> Unit;
fn zero_reg(&mut self) -> Reg; fn zero_reg(&mut self) -> Reg;
fn load_constant64_full(&mut self, arg0: u64) -> Reg; fn load_constant64_full(&mut self, arg0: u64) -> Reg;
@@ -685,7 +692,7 @@ pub enum BitOp {
Cls64, Cls64,
} }
/// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 951. /// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 958.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum FPUOp1 { pub enum FPUOp1 {
Abs32, Abs32,
@@ -698,7 +705,7 @@ pub enum FPUOp1 {
Cvt64To32, Cvt64To32,
} }
/// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 964. /// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 971.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum FPUOp2 { pub enum FPUOp2 {
Add32, Add32,
@@ -719,14 +726,14 @@ pub enum FPUOp2 {
Uqsub64, Uqsub64,
} }
/// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 989. /// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 996.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum FPUOp3 { pub enum FPUOp3 {
MAdd32, MAdd32,
MAdd64, MAdd64,
} }
/// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 996. /// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 1003.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum FpuToIntOp { pub enum FpuToIntOp {
F32ToU32, F32ToU32,
@@ -739,7 +746,7 @@ pub enum FpuToIntOp {
F64ToI64, F64ToI64,
} }
/// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1009. /// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1016.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum IntToFpuOp { pub enum IntToFpuOp {
U32ToF32, U32ToF32,
@@ -752,7 +759,7 @@ pub enum IntToFpuOp {
I64ToF64, I64ToF64,
} }
/// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1023. /// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1030.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum FpuRoundMode { pub enum FpuRoundMode {
Minus32, Minus32,
@@ -765,7 +772,7 @@ pub enum FpuRoundMode {
Nearest64, Nearest64,
} }
/// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1036. /// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1043.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecExtendOp { pub enum VecExtendOp {
Sxtl8, Sxtl8,
@@ -776,7 +783,7 @@ pub enum VecExtendOp {
Uxtl32, Uxtl32,
} }
/// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1053. /// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1060.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecALUOp { pub enum VecALUOp {
Sqadd, Sqadd,
@@ -818,7 +825,7 @@ pub enum VecALUOp {
Sqrdmulh, Sqrdmulh,
} }
/// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1132. /// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1139.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecMisc2 { pub enum VecMisc2 {
Not, Not,
@@ -840,7 +847,7 @@ pub enum VecMisc2 {
Cmeq0, Cmeq0,
} }
/// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1171. /// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1178.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecRRLongOp { pub enum VecRRLongOp {
Fcvtl16, Fcvtl16,
@@ -850,7 +857,7 @@ pub enum VecRRLongOp {
Shll32, Shll32,
} }
/// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1186. /// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1193.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecRRNarrowOp { pub enum VecRRNarrowOp {
Xtn16, Xtn16,
@@ -869,7 +876,7 @@ pub enum VecRRNarrowOp {
Fcvtn64, Fcvtn64,
} }
/// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1218. /// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1225.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecRRRLongOp { pub enum VecRRRLongOp {
Smull8, Smull8,
@@ -883,13 +890,13 @@ pub enum VecRRRLongOp {
Umlal32, Umlal32,
} }
/// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1235. /// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1242.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecPairOp { pub enum VecPairOp {
Addp, Addp,
} }
/// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1243. /// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1250.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecRRPairLongOp { pub enum VecRRPairLongOp {
Saddlp8, Saddlp8,
@@ -898,14 +905,14 @@ pub enum VecRRPairLongOp {
Uaddlp16, Uaddlp16,
} }
/// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1254. /// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1261.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecLanesOp { pub enum VecLanesOp {
Addv, Addv,
Uminv, Uminv,
} }
/// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1263. /// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1270.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VecShiftImmOp { pub enum VecShiftImmOp {
Shl, Shl,
@@ -913,7 +920,7 @@ pub enum VecShiftImmOp {
Sshr, Sshr,
} }
/// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1274. /// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1281.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum AtomicRMWOp { pub enum AtomicRMWOp {
Add, Add,
@@ -945,6 +952,42 @@ pub fn constructor_lo_reg<C: Context>(ctx: &mut C, arg0: Value) -> Option<Reg> {
return Some(expr2_0); return Some(expr2_0);
} }
// Generated as internal constructor for term vector_size.
pub fn constructor_vector_size<C: Context>(ctx: &mut C, arg0: Type) -> Option<VectorSize> {
let pattern0_0 = arg0;
if let Some((pattern1_0, pattern1_1)) = C::multi_lane(ctx, pattern0_0) {
if pattern1_0 == 8 {
if pattern1_1 == 16 {
// Rule at src/isa/aarch64/inst.isle line 952.
let expr0_0 = VectorSize::Size8x16;
return Some(expr0_0);
}
}
if pattern1_0 == 16 {
if pattern1_1 == 8 {
// Rule at src/isa/aarch64/inst.isle line 953.
let expr0_0 = VectorSize::Size16x8;
return Some(expr0_0);
}
}
if pattern1_0 == 32 {
if pattern1_1 == 4 {
// Rule at src/isa/aarch64/inst.isle line 954.
let expr0_0 = VectorSize::Size32x4;
return Some(expr0_0);
}
}
if pattern1_0 == 64 {
if pattern1_1 == 2 {
// Rule at src/isa/aarch64/inst.isle line 955.
let expr0_0 = VectorSize::Size64x2;
return Some(expr0_0);
}
}
}
return None;
}
// Generated as internal constructor for term movz. // Generated as internal constructor for term movz.
pub fn constructor_movz<C: Context>( pub fn constructor_movz<C: Context>(
ctx: &mut C, ctx: &mut C,
@@ -953,7 +996,7 @@ pub fn constructor_movz<C: Context>(
) -> Option<Reg> { ) -> Option<Reg> {
let pattern0_0 = arg0; let pattern0_0 = arg0;
let pattern1_0 = arg1; let pattern1_0 = arg1;
// Rule at src/isa/aarch64/inst.isle line 1315. // Rule at src/isa/aarch64/inst.isle line 1354.
let expr0_0: Type = I64; let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::MovZ { let expr2_0 = MInst::MovZ {
@@ -974,7 +1017,7 @@ pub fn constructor_movn<C: Context>(
) -> Option<Reg> { ) -> Option<Reg> {
let pattern0_0 = arg0; let pattern0_0 = arg0;
let pattern1_0 = arg1; let pattern1_0 = arg1;
// Rule at src/isa/aarch64/inst.isle line 1322. // Rule at src/isa/aarch64/inst.isle line 1361.
let expr0_0: Type = I64; let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::MovN { let expr2_0 = MInst::MovN {
@@ -997,7 +1040,7 @@ pub fn constructor_alu_rr_imm_logic<C: Context>(
let pattern0_0 = arg0; let pattern0_0 = arg0;
let pattern1_0 = arg1; let pattern1_0 = arg1;
let pattern2_0 = arg2; let pattern2_0 = arg2;
// Rule at src/isa/aarch64/inst.isle line 1329. // Rule at src/isa/aarch64/inst.isle line 1368.
let expr0_0: Type = I64; let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRImmLogic { let expr2_0 = MInst::AluRRImmLogic {
@@ -1011,14 +1054,177 @@ pub fn constructor_alu_rr_imm_logic<C: Context>(
return Some(expr4_0); return Some(expr4_0);
} }
// Generated as internal constructor for term orr64. // Generated as internal constructor for term alu_rrr.
pub fn constructor_orr64<C: Context>(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option<Reg> { pub fn constructor_alu_rrr<C: Context>(
ctx: &mut C,
arg0: &ALUOp,
arg1: Reg,
arg2: Reg,
) -> Option<Reg> {
let pattern0_0 = arg0; let pattern0_0 = arg0;
let pattern1_0 = arg1; let pattern1_0 = arg1;
// Rule at src/isa/aarch64/inst.isle line 1336. let pattern2_0 = arg2;
let expr0_0 = ALUOp::Orr64; // Rule at src/isa/aarch64/inst.isle line 1375.
let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; let expr0_0: Type = I64;
return Some(expr1_0); let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRR {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
rm: pattern2_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term vec_rrr.
pub fn constructor_vec_rrr<C: Context>(
ctx: &mut C,
arg0: &VecALUOp,
arg1: Reg,
arg2: Reg,
arg3: &VectorSize,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1382.
let expr0_0: Type = I8X16;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::VecRRR {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
rm: pattern2_0,
size: pattern3_0.clone(),
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term alu_rr_imm12.
pub fn constructor_alu_rr_imm12<C: Context>(
ctx: &mut C,
arg0: &ALUOp,
arg1: Reg,
arg2: Imm12,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
// Rule at src/isa/aarch64/inst.isle line 1389.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRImm12 {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
imm12: pattern2_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term alu_rrr_shift.
pub fn constructor_alu_rrr_shift<C: Context>(
ctx: &mut C,
arg0: &ALUOp,
arg1: Reg,
arg2: Reg,
arg3: ShiftOpAndAmt,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1396.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRRShift {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
rm: pattern2_0,
shiftop: pattern3_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term alu_rrr_extend.
pub fn constructor_alu_rrr_extend<C: Context>(
ctx: &mut C,
arg0: &ALUOp,
arg1: Reg,
arg2: Reg,
arg3: &ExtendOp,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1403.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRRExtend {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
rm: pattern2_0,
extendop: pattern3_0.clone(),
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term alu_rr_extend_reg.
pub fn constructor_alu_rr_extend_reg<C: Context>(
ctx: &mut C,
arg0: &ALUOp,
arg1: Reg,
arg2: &ExtendedValue,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
// Rule at src/isa/aarch64/inst.isle line 1409.
let expr0_0 = C::put_extended_in_reg(ctx, pattern2_0);
let expr1_0 = C::get_extended_op(ctx, pattern2_0);
let expr2_0 = constructor_alu_rrr_extend(ctx, pattern0_0, pattern1_0, expr0_0, &expr1_0)?;
return Some(expr2_0);
}
// Generated as internal constructor for term alu_rrrr.
pub fn constructor_alu_rrrr<C: Context>(
ctx: &mut C,
arg0: &ALUOp3,
arg1: Reg,
arg2: Reg,
arg3: Reg,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1416.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::AluRRRR {
alu_op: pattern0_0.clone(),
rd: expr1_0,
rn: pattern1_0,
rm: pattern2_0,
ra: pattern3_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
} }
// Generated as internal constructor for term imm. // Generated as internal constructor for term imm.
@@ -1027,24 +1233,25 @@ pub fn constructor_imm<C: Context>(ctx: &mut C, arg0: Type, arg1: u64) -> Option
if let Some(pattern1_0) = C::integral_ty(ctx, pattern0_0) { if let Some(pattern1_0) = C::integral_ty(ctx, pattern0_0) {
let pattern2_0 = arg1; let pattern2_0 = arg1;
if let Some(pattern3_0) = C::imm_logic_from_u64(ctx, pattern2_0) { if let Some(pattern3_0) = C::imm_logic_from_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1351. // Rule at src/isa/aarch64/inst.isle line 1434.
let expr0_0 = C::zero_reg(ctx); let expr0_0 = ALUOp::Orr64;
let expr1_0 = constructor_orr64(ctx, expr0_0, pattern3_0)?; let expr1_0 = C::zero_reg(ctx);
return Some(expr1_0); let expr2_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, expr1_0, pattern3_0)?;
return Some(expr2_0);
} }
if let Some(pattern3_0) = C::move_wide_const_from_u64(ctx, pattern2_0) { if let Some(pattern3_0) = C::move_wide_const_from_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1343. // Rule at src/isa/aarch64/inst.isle line 1426.
let expr0_0 = OperandSize::Size64; let expr0_0 = OperandSize::Size64;
let expr1_0 = constructor_movz(ctx, pattern3_0, &expr0_0)?; let expr1_0 = constructor_movz(ctx, pattern3_0, &expr0_0)?;
return Some(expr1_0); return Some(expr1_0);
} }
if let Some(pattern3_0) = C::move_wide_const_from_negated_u64(ctx, pattern2_0) { if let Some(pattern3_0) = C::move_wide_const_from_negated_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1347. // Rule at src/isa/aarch64/inst.isle line 1430.
let expr0_0 = OperandSize::Size64; let expr0_0 = OperandSize::Size64;
let expr1_0 = constructor_movn(ctx, pattern3_0, &expr0_0)?; let expr1_0 = constructor_movn(ctx, pattern3_0, &expr0_0)?;
return Some(expr1_0); return Some(expr1_0);
} }
// Rule at src/isa/aarch64/inst.isle line 1358. // Rule at src/isa/aarch64/inst.isle line 1441.
let expr0_0 = C::load_constant64_full(ctx, pattern2_0); let expr0_0 = C::load_constant64_full(ctx, pattern2_0);
return Some(expr0_0); return Some(expr0_0);
} }
@@ -1056,6 +1263,58 @@ pub fn constructor_lower<C: Context>(ctx: &mut C, arg0: Inst) -> Option<ValueReg
let pattern0_0 = arg0; let pattern0_0 = arg0;
if let Some(pattern1_0) = C::first_result(ctx, pattern0_0) { if let Some(pattern1_0) = C::first_result(ctx, pattern0_0) {
let pattern2_0 = C::value_type(ctx, pattern1_0); let pattern2_0 = C::value_type(ctx, pattern1_0);
if pattern2_0 == I128 {
let pattern4_0 = C::inst_data(ctx, pattern0_0);
if let &InstructionData::Binary {
opcode: ref pattern5_0,
args: ref pattern5_1,
} = &pattern4_0
{
match &pattern5_0 {
&Opcode::Iadd => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 81.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = ALUOp::AddS64;
let expr11_0 = constructor_alu_rrr(ctx, &expr10_0, expr2_0, expr7_0)?;
let expr12_0 = ALUOp::Adc64;
let expr13_0 = constructor_alu_rrr(ctx, &expr12_0, expr4_0, expr9_0)?;
let expr14_0 = C::value_regs(ctx, expr11_0, expr13_0);
return Some(expr14_0);
}
&Opcode::Isub => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 132.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = ALUOp::SubS64;
let expr11_0 = constructor_alu_rrr(ctx, &expr10_0, expr2_0, expr7_0)?;
let expr12_0 = ALUOp::Sbc64;
let expr13_0 = constructor_alu_rrr(ctx, &expr12_0, expr4_0, expr9_0)?;
let expr14_0 = C::value_regs(ctx, expr11_0, expr13_0);
return Some(expr14_0);
}
_ => {}
}
}
}
let pattern3_0 = C::inst_data(ctx, pattern0_0); let pattern3_0 = C::inst_data(ctx, pattern0_0);
match &pattern3_0 { match &pattern3_0 {
&InstructionData::NullAry { &InstructionData::NullAry {
@@ -1104,6 +1363,364 @@ pub fn constructor_lower<C: Context>(ctx: &mut C, arg0: Inst) -> Option<ValueReg
} }
_ => {} _ => {}
} }
if let Some((pattern3_0, pattern3_1)) = C::multi_lane(ctx, pattern2_0) {
let pattern4_0 = C::inst_data(ctx, pattern0_0);
if let &InstructionData::Binary {
opcode: ref pattern5_0,
args: ref pattern5_1,
} = &pattern4_0
{
match &pattern5_0 {
&Opcode::Iadd => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 77.
let expr0_0 = VecALUOp::Add;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = C::put_in_reg(ctx, pattern7_1);
let expr3_0 = constructor_vector_size(ctx, pattern2_0)?;
let expr4_0 =
constructor_vec_rrr(ctx, &expr0_0, expr1_0, expr2_0, &expr3_0)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
&Opcode::Isub => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 128.
let expr0_0 = VecALUOp::Sub;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = C::put_in_reg(ctx, pattern7_1);
let expr3_0 = constructor_vector_size(ctx, pattern2_0)?;
let expr4_0 =
constructor_vec_rrr(ctx, &expr0_0, expr1_0, expr2_0, &expr3_0)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
_ => {}
}
}
}
if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) {
let pattern4_0 = C::inst_data(ctx, pattern0_0);
if let &InstructionData::Binary {
opcode: ref pattern5_0,
args: ref pattern5_1,
} = &pattern4_0
{
match &pattern5_0 {
&Opcode::Iadd => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
if let Some(pattern8_0) = C::def_inst(ctx, pattern7_0) {
let pattern9_0 = C::inst_data(ctx, pattern8_0);
if let &InstructionData::Binary {
opcode: ref pattern10_0,
args: ref pattern10_1,
} = &pattern9_0
{
if let &Opcode::Imul = &pattern10_0 {
let (pattern12_0, pattern12_1) =
C::unpack_value_array_2(ctx, &pattern10_1);
// Rule at src/isa/aarch64/lower.isle line 57.
let expr0_0 = constructor_madd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern12_0);
let expr2_0 = C::put_in_reg(ctx, pattern12_1);
let expr3_0 = C::put_in_reg(ctx, pattern7_1);
let expr4_0 = constructor_alu_rrrr(
ctx, &expr0_0, expr1_0, expr2_0, expr3_0,
)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
}
}
if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) {
let pattern9_0 = C::inst_data(ctx, pattern8_0);
match &pattern9_0 {
&InstructionData::UnaryImm {
opcode: ref pattern10_0,
imm: pattern10_1,
} => {
if let &Opcode::Iconst = &pattern10_0 {
let pattern12_0 = C::u64_from_imm64(ctx, pattern10_1);
if let Some(pattern13_0) =
C::imm12_from_u64(ctx, pattern12_0)
{
// Rule at src/isa/aarch64/lower.isle line 34.
let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = constructor_alu_rr_imm12(
ctx,
&expr0_0,
expr1_0,
pattern13_0,
)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
if let Some(pattern13_0) =
C::imm12_from_negated_u64(ctx, pattern12_0)
{
// Rule at src/isa/aarch64/lower.isle line 39.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = constructor_alu_rr_imm12(
ctx,
&expr0_0,
expr1_0,
pattern13_0,
)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
}
}
&InstructionData::Binary {
opcode: ref pattern10_0,
args: ref pattern10_1,
} => {
match &pattern10_0 {
&Opcode::Imul => {
let (pattern12_0, pattern12_1) =
C::unpack_value_array_2(ctx, &pattern10_1);
// Rule at src/isa/aarch64/lower.isle line 54.
let expr0_0 = constructor_madd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern12_0);
let expr2_0 = C::put_in_reg(ctx, pattern12_1);
let expr3_0 = C::put_in_reg(ctx, pattern7_0);
let expr4_0 = constructor_alu_rrrr(
ctx, &expr0_0, expr1_0, expr2_0, expr3_0,
)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
&Opcode::Ishl => {
let (pattern12_0, pattern12_1) =
C::unpack_value_array_2(ctx, &pattern10_1);
if let Some(pattern13_0) = C::def_inst(ctx, pattern12_1)
{
let pattern14_0 = C::inst_data(ctx, pattern13_0);
if let &InstructionData::UnaryImm {
opcode: ref pattern15_0,
imm: pattern15_1,
} = &pattern14_0
{
if let &Opcode::Iconst = &pattern15_0 {
let closure17 = || {
return Some(pattern3_0);
};
if let Some(pattern17_0) = closure17() {
if let Some(pattern18_0) =
C::lshl_from_imm64(
ctx,
pattern15_1,
pattern17_0,
)
{
// Rule at src/isa/aarch64/lower.isle line 49.
let expr0_0 = constructor_iadd_op(
ctx, pattern3_0,
)?;
let expr1_0 =
C::put_in_reg(ctx, pattern7_0);
let expr2_0 =
C::put_in_reg(ctx, pattern12_0);
let expr3_0 =
constructor_alu_rrr_shift(
ctx,
&expr0_0,
expr1_0,
expr2_0,
pattern18_0,
)?;
let expr4_0 =
C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
}
}
}
}
}
_ => {}
}
}
_ => {}
}
}
if let Some(pattern8_0) = C::extended_value_from_value(ctx, pattern7_1) {
// Rule at src/isa/aarch64/lower.isle line 44.
let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 =
constructor_alu_rr_extend_reg(ctx, &expr0_0, expr1_0, &pattern8_0)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
// Rule at src/isa/aarch64/lower.isle line 30.
let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = C::put_in_reg(ctx, pattern7_1);
let expr3_0 = constructor_alu_rrr(ctx, &expr0_0, expr1_0, expr2_0)?;
let expr4_0 = C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
&Opcode::Isub => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) {
let pattern9_0 = C::inst_data(ctx, pattern8_0);
match &pattern9_0 {
&InstructionData::UnaryImm {
opcode: ref pattern10_0,
imm: pattern10_1,
} => {
if let &Opcode::Iconst = &pattern10_0 {
let pattern12_0 = C::u64_from_imm64(ctx, pattern10_1);
if let Some(pattern13_0) =
C::imm12_from_u64(ctx, pattern12_0)
{
// Rule at src/isa/aarch64/lower.isle line 108.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = constructor_alu_rr_imm12(
ctx,
&expr0_0,
expr1_0,
pattern13_0,
)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
if let Some(pattern13_0) =
C::imm12_from_negated_u64(ctx, pattern12_0)
{
// Rule at src/isa/aarch64/lower.isle line 113.
let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = constructor_alu_rr_imm12(
ctx,
&expr0_0,
expr1_0,
pattern13_0,
)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
}
}
&InstructionData::Binary {
opcode: ref pattern10_0,
args: ref pattern10_1,
} => {
if let &Opcode::Ishl = &pattern10_0 {
let (pattern12_0, pattern12_1) =
C::unpack_value_array_2(ctx, &pattern10_1);
if let Some(pattern13_0) = C::def_inst(ctx, pattern12_1) {
let pattern14_0 = C::inst_data(ctx, pattern13_0);
if let &InstructionData::UnaryImm {
opcode: ref pattern15_0,
imm: pattern15_1,
} = &pattern14_0
{
if let &Opcode::Iconst = &pattern15_0 {
let closure17 = || {
return Some(pattern3_0);
};
if let Some(pattern17_0) = closure17() {
if let Some(pattern18_0) =
C::lshl_from_imm64(
ctx,
pattern15_1,
pattern17_0,
)
{
// Rule at src/isa/aarch64/lower.isle line 123.
let expr0_0 = constructor_isub_op(
ctx, pattern3_0,
)?;
let expr1_0 =
C::put_in_reg(ctx, pattern7_0);
let expr2_0 =
C::put_in_reg(ctx, pattern12_0);
let expr3_0 =
constructor_alu_rrr_shift(
ctx,
&expr0_0,
expr1_0,
expr2_0,
pattern18_0,
)?;
let expr4_0 =
C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
}
}
}
}
}
}
_ => {}
}
}
if let Some(pattern8_0) = C::extended_value_from_value(ctx, pattern7_1) {
// Rule at src/isa/aarch64/lower.isle line 118.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 =
constructor_alu_rr_extend_reg(ctx, &expr0_0, expr1_0, &pattern8_0)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
// Rule at src/isa/aarch64/lower.isle line 104.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::put_in_reg(ctx, pattern7_0);
let expr2_0 = C::put_in_reg(ctx, pattern7_1);
let expr3_0 = constructor_alu_rrr(ctx, &expr0_0, expr1_0, expr2_0)?;
let expr4_0 = C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
_ => {}
}
}
}
} }
return None; return None;
} }
// Generated as internal constructor for term iadd_op.
pub fn constructor_iadd_op<C: Context>(ctx: &mut C, arg0: Type) -> Option<ALUOp> {
let pattern0_0 = arg0;
if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) {
// Rule at src/isa/aarch64/lower.isle line 62.
let expr0_0 = ALUOp::Add32;
return Some(expr0_0);
}
// Rule at src/isa/aarch64/lower.isle line 63.
let expr0_0 = ALUOp::Add64;
return Some(expr0_0);
}
// Generated as internal constructor for term isub_op.
pub fn constructor_isub_op<C: Context>(ctx: &mut C, arg0: Type) -> Option<ALUOp> {
let pattern0_0 = arg0;
if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) {
// Rule at src/isa/aarch64/lower.isle line 67.
let expr0_0 = ALUOp::Sub32;
return Some(expr0_0);
}
// Rule at src/isa/aarch64/lower.isle line 68.
let expr0_0 = ALUOp::Sub64;
return Some(expr0_0);
}
// Generated as internal constructor for term madd_op.
pub fn constructor_madd_op<C: Context>(ctx: &mut C, arg0: Type) -> Option<ALUOp3> {
let pattern0_0 = arg0;
if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) {
// Rule at src/isa/aarch64/lower.isle line 72.
let expr0_0 = ALUOp3::MAdd32;
return Some(expr0_0);
}
// Rule at src/isa/aarch64/lower.isle line 73.
let expr0_0 = ALUOp3::MAdd64;
return Some(expr0_0);
}

View File

@@ -42,7 +42,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
return Ok(()); return Ok(());
} }
let implemented_in_isle = || { let implemented_in_isle = |ctx: &mut C| {
unreachable!( unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`", "implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn), ctx.dfg().display_inst(insn),
@@ -51,7 +51,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}; };
match op { match op {
Opcode::Iconst | Opcode::Bconst | Opcode::Null => implemented_in_isle(), Opcode::Iconst | Opcode::Bconst | Opcode::Null => implemented_in_isle(ctx),
Opcode::F32const => { Opcode::F32const => {
let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32); let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32);
@@ -63,143 +63,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
lower_constant_f64(ctx, rd, value); lower_constant_f64(ctx, rd, value);
} }
Opcode::Iadd => { Opcode::Iadd => implemented_in_isle(ctx),
match ty.unwrap() { Opcode::Isub => implemented_in_isle(ctx),
ty if ty.is_vector() => {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
ctx.emit(Inst::VecRRR {
rd,
rn,
rm,
alu_op: VecALUOp::Add,
size: VectorSize::from_ty(ty),
});
}
I128 => {
let lhs = put_input_in_regs(ctx, inputs[0]);
let rhs = put_input_in_regs(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
assert_eq!(lhs.len(), 2);
assert_eq!(rhs.len(), 2);
assert_eq!(dst.len(), 2);
// adds x0, x0, x2
// adc x1, x1, x3
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64,
rd: dst.regs()[0],
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Adc64,
rd: dst.regs()[1],
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
}
ty => {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let mul_insn = if let Some(mul_insn) =
maybe_input_insn(ctx, inputs[1], Opcode::Imul)
{
Some((mul_insn, 0))
} else if let Some(mul_insn) = maybe_input_insn(ctx, inputs[0], Opcode::Imul) {
Some((mul_insn, 1))
} else {
None
};
// If possible combine mul + add into madd.
if let Some((insn, addend_idx)) = mul_insn {
let alu_op = choose_32_64(ty, ALUOp3::MAdd32, ALUOp3::MAdd64);
let rn_input = InsnInput { insn, input: 0 };
let rm_input = InsnInput { insn, input: 1 };
let rn = put_input_in_reg(ctx, rn_input, NarrowValueMode::None);
let rm = put_input_in_reg(ctx, rm_input, NarrowValueMode::None);
let ra = put_input_in_reg(ctx, inputs[addend_idx], NarrowValueMode::None);
ctx.emit(Inst::AluRRRR {
alu_op,
rd,
rn,
rm,
ra,
});
} else {
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let (rm, negated) = put_input_in_rse_imm12_maybe_negated(
ctx,
inputs[1],
ty_bits(ty),
NarrowValueMode::None,
);
let alu_op = if !negated {
choose_32_64(ty, ALUOp::Add32, ALUOp::Add64)
} else {
choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64)
};
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
}
}
}
}
Opcode::Isub => {
let ty = ty.unwrap();
if ty == I128 {
let lhs = put_input_in_regs(ctx, inputs[0]);
let rhs = put_input_in_regs(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
assert_eq!(lhs.len(), 2);
assert_eq!(rhs.len(), 2);
assert_eq!(dst.len(), 2);
// subs x0, x0, x2
// sbc x1, x1, x3
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64,
rd: dst.regs()[0],
rn: lhs.regs()[0],
rm: rhs.regs()[0],
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Sbc64,
rd: dst.regs()[1],
rn: lhs.regs()[1],
rm: rhs.regs()[1],
});
} else {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
if !ty.is_vector() {
let (rm, negated) = put_input_in_rse_imm12_maybe_negated(
ctx,
inputs[1],
ty_bits(ty),
NarrowValueMode::None,
);
let alu_op = if !negated {
choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64)
} else {
choose_32_64(ty, ALUOp::Add32, ALUOp::Add64)
};
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
} else {
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
ctx.emit(Inst::VecRRR {
rd,
rn,
rm,
alu_op: VecALUOp::Sub,
size: VectorSize::from_ty(ty),
});
}
}
}
Opcode::UaddSat | Opcode::SaddSat | Opcode::UsubSat | Opcode::SsubSat => { Opcode::UaddSat | Opcode::SaddSat | Opcode::UsubSat | Opcode::SsubSat => {
let ty = ty.unwrap(); let ty = ty.unwrap();

View File

@@ -1,4 +1,10 @@
src/clif.isle 9c0563583e5500de00ec5e226edc0547ac3ea789c8d76f1da0401c80ec619320fdc9a6f17fd76bbcac74a5894f85385c1f51c900c2b83bc9906d03d0f29bf5cb src/clif.isle 9c0563583e5500de00ec5e226edc0547ac3ea789c8d76f1da0401c80ec619320fdc9a6f17fd76bbcac74a5894f85385c1f51c900c2b83bc9906d03d0f29bf5cb
<<<<<<< HEAD
src/prelude.isle c1391bcd436c23caf46b909ba7b5a352405014f0c393e3886cf1b9ad37f610b0563e8a64daad215f107395e6bb55744d955dd9c6344bb19b96587c2deb703462 src/prelude.isle c1391bcd436c23caf46b909ba7b5a352405014f0c393e3886cf1b9ad37f610b0563e8a64daad215f107395e6bb55744d955dd9c6344bb19b96587c2deb703462
src/isa/x64/inst.isle 6065d3b9e0fa3361d179d9b87d09568ff474f8bac7eeabd29b328ace723041f96045bc82cfa2d7feda4490ce7e5d4be1a1c7ebe25c99916564d43a51550cd093 src/isa/x64/inst.isle 6065d3b9e0fa3361d179d9b87d09568ff474f8bac7eeabd29b328ace723041f96045bc82cfa2d7feda4490ce7e5d4be1a1c7ebe25c99916564d43a51550cd093
src/isa/x64/lower.isle e51b7a67343dba342a43b3c9e4b9ed7df9b2c66a677018acf7054ba48c27e4e93a4421fd892b9bf7c0e5b790bcfafab7cb3e93ce2b8206c04d456918d2ad0b5a src/isa/x64/lower.isle e51b7a67343dba342a43b3c9e4b9ed7df9b2c66a677018acf7054ba48c27e4e93a4421fd892b9bf7c0e5b790bcfafab7cb3e93ce2b8206c04d456918d2ad0b5a
=======
src/prelude.isle bed4d567d548a0df8c975ac88275b75547494cf761c63b9775d117396382eb1302777de53889804ddc5dee96d04a6764cff59d43a4d4a614fb50f95dc1e44749
src/isa/x64/inst.isle fdfbfc6dfad1fc5ed252e0a14ccc69baba51d0538e05cfb9916f6213e5a6fcfc9d22605a29bd684d6a66f6d5e1c8ec36a963660d52c2e8b3fb6e0758f7adb7b5
src/isa/x64/lower.isle 8555abdae385431c96aaabc392b7b3a8b1bbe733be08b007ef776850860cb77e85a140db02f427586c155c0b0129f9ffd531abd2e4a772c72667535cc015e609
>>>>>>> aarch64: Migrate `iadd` and `isub` to ISLE

View File

@@ -31,6 +31,7 @@ pub trait Context {
fn u16_as_u64(&mut self, arg0: u16) -> u64; fn u16_as_u64(&mut self, arg0: u16) -> u64;
fn u32_as_u64(&mut self, arg0: u32) -> u64; fn u32_as_u64(&mut self, arg0: u32) -> u64;
fn ty_bits(&mut self, arg0: Type) -> u16; fn ty_bits(&mut self, arg0: Type) -> u16;
fn fits_in_32(&mut self, arg0: Type) -> Option<Type>;
fn fits_in_64(&mut self, arg0: Type) -> Option<Type>; fn fits_in_64(&mut self, arg0: Type) -> Option<Type>;
fn value_list_slice(&mut self, arg0: ValueList) -> ValueSlice; fn value_list_slice(&mut self, arg0: ValueList) -> ValueSlice;
fn unwrap_head_value_list_1(&mut self, arg0: ValueList) -> (Value, ValueSlice); fn unwrap_head_value_list_1(&mut self, arg0: ValueList) -> (Value, ValueSlice);

View File

@@ -90,6 +90,15 @@ macro_rules! isle_prelude_methods {
ty.bits() ty.bits()
} }
#[inline]
fn fits_in_32(&mut self, ty: Type) -> Option<Type> {
if ty.bits() <= 32 {
Some(ty)
} else {
None
}
}
#[inline] #[inline]
fn fits_in_64(&mut self, ty: Type) -> Option<Type> { fn fits_in_64(&mut self, ty: Type) -> Option<Type> {
if ty.bits() <= 64 { if ty.bits() <= 64 {

View File

@@ -105,6 +105,8 @@ pub trait LowerCtx {
fn num_outputs(&self, ir_inst: Inst) -> usize; fn num_outputs(&self, ir_inst: Inst) -> usize;
/// Get the type for an instruction's input. /// Get the type for an instruction's input.
fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type; fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type;
/// Get the type for a value.
fn value_ty(&self, val: Value) -> Type;
/// Get the type for an instruction's output. /// Get the type for an instruction's output.
fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type; fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type;
/// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
@@ -129,6 +131,9 @@ pub trait LowerCtx {
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput; fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput;
/// Like `get_input_as_source_or_const` but with a `Value`. /// Like `get_input_as_source_or_const` but with a `Value`.
fn get_value_as_source_or_const(&self, value: Value) -> NonRegInput; fn get_value_as_source_or_const(&self, value: Value) -> NonRegInput;
/// Resolves a particular input of an instruction to the `Value` that it is
/// represented with.
fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value;
/// Put the `idx`th input into register(s) and return the assigned register. /// Put the `idx`th input into register(s) and return the assigned register.
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg>; fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg>;
/// Put the given value into register(s) and return the assigned register. /// Put the given value into register(s) and return the assigned register.
@@ -1109,8 +1114,10 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
} }
fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type { fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
let val = self.f.dfg.inst_args(ir_inst)[idx]; self.value_ty(self.input_as_value(ir_inst, idx))
let val = self.f.dfg.resolve_aliases(val); }
fn value_ty(&self, val: Value) -> Type {
self.f.dfg.value_type(val) self.f.dfg.value_type(val)
} }
@@ -1122,9 +1129,13 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
self.inst_constants.get(&ir_inst).cloned() self.inst_constants.get(&ir_inst).cloned()
} }
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput { fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
let val = self.f.dfg.inst_args(ir_inst)[idx]; let val = self.f.dfg.inst_args(ir_inst)[idx];
let val = self.f.dfg.resolve_aliases(val); self.f.dfg.resolve_aliases(val)
}
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
let val = self.input_as_value(ir_inst, idx);
self.get_value_as_source_or_const(val) self.get_value_as_source_or_const(val)
} }

View File

@@ -144,6 +144,10 @@
;;;; Helper Clif Extractors ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Helper Clif Extractors ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; An extractor that only matches types that can fit in 32 bits.
(decl fits_in_32 (Type) Type)
(extern extractor fits_in_32 fits_in_32)
;; An extractor that only matches types that can fit in 64 bits. ;; An extractor that only matches types that can fit in 64 bits.
(decl fits_in_64 (Type) Type) (decl fits_in_64 (Type) Type)
(extern extractor fits_in_64 fits_in_64) (extern extractor fits_in_64 fits_in_64)

View File

@@ -369,3 +369,23 @@ block0(v0: i128, v1: i128):
; nextln: madd x0, x0, x2, xzr ; nextln: madd x0, x0, x2, xzr
; nextln: ret ; nextln: ret
function %add_mul_1(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = imul v1, v2
v4 = iadd v0, v3
return v4
}
; check: madd w0, w1, w2, w0
; nextln: ret
function %add_mul_2(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = imul v1, v2
v4 = iadd v3, v0
return v4
}
; check: madd w0, w1, w2, w0
; nextln: ret