[AArch64] Merge 32- and 64-bit ALUOps (#3802)

Combine the two opcodes into one and pass and add an OperandSize
field to these instructions, as well as an ISLE helper to perform
the conversion from Type.

This saves us from having having to write ISLE helpers to select the
correct opcode, based on type, and reduces the amount of code needed
for emission.

Copyright (c) 2022, Arm Limited.
This commit is contained in:
Sam Parker
2022-02-17 18:03:54 +00:00
committed by GitHub
parent b62fe21914
commit e572198f85
10 changed files with 1786 additions and 1987 deletions

View File

@@ -514,7 +514,8 @@ impl ABIMachineSpec for AArch64MachineDeps {
let mut insts = SmallVec::new(); let mut insts = SmallVec::new();
if let Some(imm12) = Imm12::maybe_from_u64(imm) { if let Some(imm12) = Imm12::maybe_from_u64(imm) {
insts.push(Inst::AluRRImm12 { insts.push(Inst::AluRRImm12 {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: into_reg, rd: into_reg,
rn: from_reg, rn: from_reg,
imm12, imm12,
@@ -524,7 +525,8 @@ impl ABIMachineSpec for AArch64MachineDeps {
assert_ne!(scratch2.to_reg(), from_reg); assert_ne!(scratch2.to_reg(), from_reg);
insts.extend(Inst::load_constant(scratch2, imm.into())); insts.extend(Inst::load_constant(scratch2, imm.into()));
insts.push(Inst::AluRRRExtend { insts.push(Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: into_reg, rd: into_reg,
rn: from_reg, rn: from_reg,
rm: scratch2.to_reg(), rm: scratch2.to_reg(),
@@ -537,7 +539,8 @@ impl ABIMachineSpec for AArch64MachineDeps {
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> { fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
let mut insts = SmallVec::new(); let mut insts = SmallVec::new();
insts.push(Inst::AluRRRExtend { insts.push(Inst::AluRRRExtend {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: stack_reg(), rn: stack_reg(),
rm: limit_reg, rm: limit_reg,
@@ -586,12 +589,13 @@ impl ABIMachineSpec for AArch64MachineDeps {
(-amount as u64, true) (-amount as u64, true)
}; };
let alu_op = if is_sub { ALUOp::Sub64 } else { ALUOp::Add64 }; let alu_op = if is_sub { ALUOp::Sub } else { ALUOp::Add };
let mut ret = SmallVec::new(); let mut ret = SmallVec::new();
if let Some(imm12) = Imm12::maybe_from_u64(amount) { if let Some(imm12) = Imm12::maybe_from_u64(amount) {
let adj_inst = Inst::AluRRImm12 { let adj_inst = Inst::AluRRImm12 {
alu_op, alu_op,
size: OperandSize::Size64,
rd: writable_stack_reg(), rd: writable_stack_reg(),
rn: stack_reg(), rn: stack_reg(),
imm12, imm12,
@@ -602,6 +606,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
let const_inst = Inst::load_constant(tmp, amount); let const_inst = Inst::load_constant(tmp, amount);
let adj_inst = Inst::AluRRRExtend { let adj_inst = Inst::AluRRRExtend {
alu_op, alu_op,
size: OperandSize::Size64,
rd: writable_stack_reg(), rd: writable_stack_reg(),
rn: stack_reg(), rn: stack_reg(),
rm: tmp.to_reg(), rm: tmp.to_reg(),
@@ -659,7 +664,8 @@ impl ABIMachineSpec for AArch64MachineDeps {
// mov fp (x29), sp. This uses the ADDI rd, rs, 0 form of `MOV` because // mov fp (x29), sp. This uses the ADDI rd, rs, 0 form of `MOV` because
// the usual encoding (`ORR`) does not work with SP. // the usual encoding (`ORR`) does not work with SP.
insts.push(Inst::AluRRImm12 { insts.push(Inst::AluRRImm12 {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: writable_fp_reg(), rd: writable_fp_reg(),
rn: stack_reg(), rn: stack_reg(),
imm12: Imm12 { imm12: Imm12 {

View File

@@ -10,6 +10,7 @@
;; An ALU operation with two register sources and a register destination. ;; An ALU operation with two register sources and a register destination.
(AluRRR (AluRRR
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(rm Reg)) (rm Reg))
@@ -26,6 +27,7 @@
;; destination. ;; destination.
(AluRRImm12 (AluRRImm12
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(imm12 Imm12)) (imm12 Imm12))
@@ -33,6 +35,7 @@
;; An ALU operation with a register source and an immediate-logic source, and a register destination. ;; An ALU operation with a register source and an immediate-logic source, and a register destination.
(AluRRImmLogic (AluRRImmLogic
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(imml ImmLogic)) (imml ImmLogic))
@@ -40,6 +43,7 @@
;; An ALU operation with a register source and an immediate-shiftamt source, and a register destination. ;; An ALU operation with a register source and an immediate-shiftamt source, and a register destination.
(AluRRImmShift (AluRRImmShift
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(immshift ImmShift)) (immshift ImmShift))
@@ -48,6 +52,7 @@
;; destination. ;; destination.
(AluRRRShift (AluRRRShift
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(rm Reg) (rm Reg)
@@ -57,6 +62,7 @@
;; shifted, and a register destination. ;; shifted, and a register destination.
(AluRRRExtend (AluRRRExtend
(alu_op ALUOp) (alu_op ALUOp)
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rn Reg) (rn Reg)
(rm Reg) (rm Reg)
@@ -788,62 +794,39 @@
;; below (see `Inst`) in any combination. ;; below (see `Inst`) in any combination.
(type ALUOp (type ALUOp
(enum (enum
(Add32) (Add)
(Add64) (Sub)
(Sub32) (Orr)
(Sub64) (OrrNot)
(Orr32) (And)
(Orr64) (AndS)
(OrrNot32) (AndNot)
(OrrNot64)
(And32)
(And64)
(AndS32)
(AndS64)
(AndNot32)
(AndNot64)
;; XOR (AArch64 calls this "EOR") ;; XOR (AArch64 calls this "EOR")
(Eor32) (Eor)
;; XOR (AArch64 calls this "EOR")
(Eor64)
;; XNOR (AArch64 calls this "EOR-NOT") ;; XNOR (AArch64 calls this "EOR-NOT")
(EorNot32) (EorNot)
;; XNOR (AArch64 calls this "EOR-NOT")
(EorNot64)
;; Add, setting flags ;; Add, setting flags
(AddS32) (AddS)
;; Add, setting flags
(AddS64)
;; Sub, setting flags ;; Sub, setting flags
(SubS32) (SubS)
;; Sub, setting flags
(SubS64)
;; Signed multiply, high-word result ;; Signed multiply, high-word result
(SMulH) (SMulH)
;; Unsigned multiply, high-word result ;; Unsigned multiply, high-word result
(UMulH) (UMulH)
(SDiv64) (SDiv)
(UDiv64) (UDiv)
(RotR32) (RotR)
(RotR64) (Lsr)
(Lsr32) (Asr)
(Lsr64) (Lsl)
(Asr32)
(Asr64)
(Lsl32)
(Lsl64)
;; Add with carry ;; Add with carry
(Adc32) (Adc)
(Adc64)
;; Add with carry, settings flags ;; Add with carry, settings flags
(AdcS32) (AdcS)
(AdcS64)
;; Subtract with carry ;; Subtract with carry
(Sbc32) (Sbc)
(Sbc64)
;; Subtract with carry, settings flags ;; Subtract with carry, settings flags
(SbcS32) (SbcS)
(SbcS64)
)) ))
;; An ALU operation with three arguments. ;; An ALU operation with three arguments.
@@ -910,6 +893,11 @@
(enum Size32 (enum Size32
Size64)) Size64))
;; Helper for calculating the `OperandSize` corresponding to a type
(decl operand_size (Type) OperandSize)
(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32))
(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64))
(type ScalarSize extern (type ScalarSize extern
(enum Size8 (enum Size8
Size16 Size16
@@ -1388,24 +1376,24 @@
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRImmLogic` instructions. ;; Helper for emitting `MInst.AluRRImmLogic` instructions.
(decl alu_rr_imm_logic (ALUOp Reg ImmLogic) Reg) (decl alu_rr_imm_logic (ALUOp Type Reg ImmLogic) Reg)
(rule (alu_rr_imm_logic op src imm) (rule (alu_rr_imm_logic op ty src imm)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRImmLogic op dst src imm)))) (_ Unit (emit (MInst.AluRRImmLogic op (operand_size ty) dst src imm))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRImmShift` instructions. ;; Helper for emitting `MInst.AluRRImmShift` instructions.
(decl alu_rr_imm_shift (ALUOp Reg ImmShift) Reg) (decl alu_rr_imm_shift (ALUOp Type Reg ImmShift) Reg)
(rule (alu_rr_imm_shift op src imm) (rule (alu_rr_imm_shift op ty src imm)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRImmShift op dst src imm)))) (_ Unit (emit (MInst.AluRRImmShift op (operand_size ty) dst src imm))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRR` instructions. ;; Helper for emitting `MInst.AluRRR` instructions.
(decl alu_rrr (ALUOp Reg Reg) Reg) (decl alu_rrr (ALUOp Type Reg Reg) Reg)
(rule (alu_rrr op src1 src2) (rule (alu_rrr op ty src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRR op dst src1 src2)))) (_ Unit (emit (MInst.AluRRR op (operand_size ty) dst src1 src2))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.VecRRR` instructions. ;; Helper for emitting `MInst.VecRRR` instructions.
@@ -1430,33 +1418,33 @@
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRImm12` instructions. ;; Helper for emitting `MInst.AluRRImm12` instructions.
(decl alu_rr_imm12 (ALUOp Reg Imm12) Reg) (decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg)
(rule (alu_rr_imm12 op src imm) (rule (alu_rr_imm12 op ty src imm)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRImm12 op dst src imm)))) (_ Unit (emit (MInst.AluRRImm12 op (operand_size ty) dst src imm))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRRShift` instructions. ;; Helper for emitting `MInst.AluRRRShift` instructions.
(decl alu_rrr_shift (ALUOp Reg Reg ShiftOpAndAmt) Reg) (decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg)
(rule (alu_rrr_shift op src1 src2 shift) (rule (alu_rrr_shift op ty src1 src2 shift)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRRShift op dst src1 src2 shift)))) (_ Unit (emit (MInst.AluRRRShift op (operand_size ty) dst src1 src2 shift))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `MInst.AluRRRExtend` instructions. ;; Helper for emitting `MInst.AluRRRExtend` instructions.
(decl alu_rrr_extend (ALUOp Reg Reg ExtendOp) Reg) (decl alu_rrr_extend (ALUOp Type Reg Reg ExtendOp) Reg)
(rule (alu_rrr_extend op src1 src2 extend) (rule (alu_rrr_extend op ty src1 src2 extend)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.AluRRRExtend op dst src1 src2 extend)))) (_ Unit (emit (MInst.AluRRRExtend op (operand_size ty) dst src1 src2 extend))))
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Same as `alu_rrr_extend`, but takes an `ExtendedValue` packed "pair" instead ;; Same as `alu_rrr_extend`, but takes an `ExtendedValue` packed "pair" instead
;; of a `Reg` and an `ExtendOp`. ;; of a `Reg` and an `ExtendOp`.
(decl alu_rr_extend_reg (ALUOp Reg ExtendedValue) Reg) (decl alu_rr_extend_reg (ALUOp Type Reg ExtendedValue) Reg)
(rule (alu_rr_extend_reg op src1 extended_reg) (rule (alu_rr_extend_reg op ty src1 extended_reg)
(let ((src2 Reg (put_extended_in_reg extended_reg)) (let ((src2 Reg (put_extended_in_reg extended_reg))
(extend ExtendOp (get_extended_op extended_reg))) (extend ExtendOp (get_extended_op extended_reg)))
(alu_rrr_extend op src1 src2 extend))) (alu_rrr_extend op ty src1 src2 extend)))
;; Helper for emitting `MInst.AluRRRR` instructions. ;; Helper for emitting `MInst.AluRRRR` instructions.
(decl alu_rrrr (ALUOp3 Reg Reg Reg) Reg) (decl alu_rrrr (ALUOp3 Reg Reg Reg) Reg)
@@ -1473,36 +1461,36 @@
(writable_reg_to_reg dst))) (writable_reg_to_reg dst)))
;; Helper for emitting `adds` instructions. ;; Helper for emitting `adds` instructions.
(decl add64_with_flags (Reg Reg) ProducesFlags) (decl add_with_flags (Type Reg Reg) ProducesFlags)
(rule (add64_with_flags src1 src2) (rule (add_with_flags ty src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64))) (let ((dst WritableReg (temp_writable_reg $I64)))
(ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.AddS64) dst src1 src2) (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.AddS) (operand_size ty) dst src1 src2)
(writable_reg_to_reg dst)))) (writable_reg_to_reg dst))))
;; Helper for emitting `adc` instructions. ;; Helper for emitting `adc` instructions.
(decl adc64 (Reg Reg) ConsumesFlags) (decl adc (Type Reg Reg) ConsumesFlags)
(rule (adc64 src1 src2) (rule (adc ty src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64))) (let ((dst WritableReg (temp_writable_reg $I64)))
(ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Adc64) dst src1 src2) (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Adc) (operand_size ty) dst src1 src2)
(writable_reg_to_reg dst)))) (writable_reg_to_reg dst))))
;; Helper for emitting `subs` instructions. ;; Helper for emitting `subs` instructions.
(decl sub64_with_flags (Reg Reg) ProducesFlags) (decl sub_with_flags (Type Reg Reg) ProducesFlags)
(rule (sub64_with_flags src1 src2) (rule (sub_with_flags ty src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64))) (let ((dst WritableReg (temp_writable_reg $I64)))
(ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.SubS64) dst src1 src2) (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.SubS) (operand_size ty) dst src1 src2)
(writable_reg_to_reg dst)))) (writable_reg_to_reg dst))))
(decl cmp64_imm (Reg Imm12) ProducesFlags) (decl cmp64_imm (Reg Imm12) ProducesFlags)
(rule (cmp64_imm src1 src2) (rule (cmp64_imm src1 src2)
(ProducesFlags.ProducesFlags (MInst.AluRRImm12 (ALUOp.SubS64) (writable_zero_reg) src1 src2) (ProducesFlags.ProducesFlags (MInst.AluRRImm12 (ALUOp.SubS) (OperandSize.Size64) (writable_zero_reg) src1 src2)
(zero_reg))) (zero_reg)))
;; Helper for emitting `sbc` instructions. ;; Helper for emitting `sbc` instructions.
(decl sbc64 (Reg Reg) ConsumesFlags) (decl sbc (Type Reg Reg) ConsumesFlags)
(rule (sbc64 src1 src2) (rule (sbc ty src1 src2)
(let ((dst WritableReg (temp_writable_reg $I64))) (let ((dst WritableReg (temp_writable_reg $I64)))
(ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Sbc64) dst src1 src2) (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Sbc) (operand_size ty) dst src1 src2)
(writable_reg_to_reg dst)))) (writable_reg_to_reg dst))))
;; Helper for emitting `MInst.VecMisc` instructions. ;; Helper for emitting `MInst.VecMisc` instructions.
@@ -1591,9 +1579,10 @@
;; ;;
;; Produces a `ProducesFlags` rather than a register or emitted instruction ;; Produces a `ProducesFlags` rather than a register or emitted instruction
;; which must be paired with `with_flags*` helpers. ;; which must be paired with `with_flags*` helpers.
(decl tst64_imm (Reg ImmLogic) ProducesFlags) (decl tst_imm (Type Reg ImmLogic) ProducesFlags)
(rule (tst64_imm reg imm) (rule (tst_imm ty reg imm)
(ProducesFlags.ProducesFlags (MInst.AluRRImmLogic (ALUOp.AndS64) (ProducesFlags.ProducesFlags (MInst.AluRRImmLogic (ALUOp.AndS)
(operand_size ty)
(writable_zero_reg) (writable_zero_reg)
reg reg
imm) imm)
@@ -1613,44 +1602,16 @@
;; Helpers for generating `add` instructions. ;; Helpers for generating `add` instructions.
(decl add (Type Reg Reg) Reg) (decl add (Type Reg Reg) Reg)
(rule (add (fits_in_32 _ty) x y) (add32 x y)) (rule (add ty x y) (alu_rrr (ALUOp.Add) ty x y))
(rule (add $I64 x y) (add64 x y))
(decl add32 (Reg Reg) Reg)
(rule (add32 x y) (alu_rrr (ALUOp.Add32) x y))
(decl add64 (Reg Reg) Reg)
(rule (add64 x y) (alu_rrr (ALUOp.Add64) x y))
(decl add_imm (Type Reg Imm12) Reg) (decl add_imm (Type Reg Imm12) Reg)
(rule (add_imm (fits_in_32 _ty) x y) (add32_imm x y)) (rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y))
(rule (add_imm $I64 x y) (add64_imm x y))
(decl add32_imm (Reg Imm12) Reg)
(rule (add32_imm x y) (alu_rr_imm12 (ALUOp.Add32) x y))
(decl add64_imm (Reg Imm12) Reg)
(rule (add64_imm x y) (alu_rr_imm12 (ALUOp.Add64) x y))
(decl add_extend (Type Reg ExtendedValue) Reg) (decl add_extend (Type Reg ExtendedValue) Reg)
(rule (add_extend (fits_in_32 _ty) x y) (add32_extend x y)) (rule (add_extend ty x y) (alu_rr_extend_reg (ALUOp.Add) ty x y))
(rule (add_extend $I64 x y) (add64_extend x y))
(decl add32_extend (Reg ExtendedValue) Reg)
(rule (add32_extend x y) (alu_rr_extend_reg (ALUOp.Add32) x y))
(decl add64_extend (Reg ExtendedValue) Reg)
(rule (add64_extend x y) (alu_rr_extend_reg (ALUOp.Add64) x y))
(decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) (decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg)
(rule (add_shift (fits_in_32 _ty) x y z) (add32_shift x y z)) (rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z))
(rule (add_shift $I64 x y z) (add64_shift x y z))
(decl add32_shift (Reg Reg ShiftOpAndAmt) Reg)
(rule (add32_shift x y z) (alu_rrr_shift (ALUOp.Add32) x y z))
(decl add64_shift (Reg Reg ShiftOpAndAmt) Reg)
(rule (add64_shift x y z) (alu_rrr_shift (ALUOp.Add64) x y z))
(decl add_vec (Reg Reg VectorSize) Reg) (decl add_vec (Reg Reg VectorSize) Reg)
(rule (add_vec x y size) (vec_rrr (VecALUOp.Add) x y size)) (rule (add_vec x y size) (vec_rrr (VecALUOp.Add) x y size))
@@ -1658,44 +1619,16 @@
;; Helpers for generating `sub` instructions. ;; Helpers for generating `sub` instructions.
(decl sub (Type Reg Reg) Reg) (decl sub (Type Reg Reg) Reg)
(rule (sub (fits_in_32 _ty) x y) (sub32 x y)) (rule (sub ty x y) (alu_rrr (ALUOp.Sub) ty x y))
(rule (sub $I64 x y) (sub64 x y))
(decl sub32 (Reg Reg) Reg)
(rule (sub32 x y) (alu_rrr (ALUOp.Sub32) x y))
(decl sub64 (Reg Reg) Reg)
(rule (sub64 x y) (alu_rrr (ALUOp.Sub64) x y))
(decl sub_imm (Type Reg Imm12) Reg) (decl sub_imm (Type Reg Imm12) Reg)
(rule (sub_imm (fits_in_32 _ty) x y) (sub32_imm x y)) (rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y))
(rule (sub_imm $I64 x y) (sub64_imm x y))
(decl sub32_imm (Reg Imm12) Reg)
(rule (sub32_imm x y) (alu_rr_imm12 (ALUOp.Sub32) x y))
(decl sub64_imm (Reg Imm12) Reg)
(rule (sub64_imm x y) (alu_rr_imm12 (ALUOp.Sub64) x y))
(decl sub_extend (Type Reg ExtendedValue) Reg) (decl sub_extend (Type Reg ExtendedValue) Reg)
(rule (sub_extend (fits_in_32 _ty) x y) (sub32_extend x y)) (rule (sub_extend ty x y) (alu_rr_extend_reg (ALUOp.Sub) ty x y))
(rule (sub_extend $I64 x y) (sub64_extend x y))
(decl sub32_extend (Reg ExtendedValue) Reg)
(rule (sub32_extend x y) (alu_rr_extend_reg (ALUOp.Sub32) x y))
(decl sub64_extend (Reg ExtendedValue) Reg)
(rule (sub64_extend x y) (alu_rr_extend_reg (ALUOp.Sub64) x y))
(decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg) (decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg)
(rule (sub_shift (fits_in_32 _ty) x y z) (sub32_shift x y z)) (rule (sub_shift ty x y z) (alu_rrr_shift (ALUOp.Sub) ty x y z))
(rule (sub_shift $I64 x y z) (sub64_shift x y z))
(decl sub32_shift (Reg Reg ShiftOpAndAmt) Reg)
(rule (sub32_shift x y z) (alu_rrr_shift (ALUOp.Sub32) x y z))
(decl sub64_shift (Reg Reg ShiftOpAndAmt) Reg)
(rule (sub64_shift x y z) (alu_rrr_shift (ALUOp.Sub64) x y z))
(decl sub_vec (Reg Reg VectorSize) Reg) (decl sub_vec (Reg Reg VectorSize) Reg)
(rule (sub_vec x y size) (vec_rrr (VecALUOp.Sub) x y size)) (rule (sub_vec x y size) (vec_rrr (VecALUOp.Sub) x y size))
@@ -1734,12 +1667,12 @@
(rule (sqsub x y size) (vec_rrr (VecALUOp.Sqsub) x y size)) (rule (sqsub x y size) (vec_rrr (VecALUOp.Sqsub) x y size))
;; Helper for generating `umulh` instructions. ;; Helper for generating `umulh` instructions.
(decl umulh (Reg Reg) Reg) (decl umulh (Type Reg Reg) Reg)
(rule (umulh x y) (alu_rrr (ALUOp.UMulH) x y)) (rule (umulh ty x y) (alu_rrr (ALUOp.UMulH) ty x y))
;; Helper for generating `smulh` instructions. ;; Helper for generating `smulh` instructions.
(decl smulh (Reg Reg) Reg) (decl smulh (Type Reg Reg) Reg)
(rule (smulh x y) (alu_rrr (ALUOp.SMulH) x y)) (rule (smulh ty x y) (alu_rrr (ALUOp.SMulH) ty x y))
;; Helper for generating `mul` instructions. ;; Helper for generating `mul` instructions.
(decl mul (Reg Reg VectorSize) Reg) (decl mul (Reg Reg VectorSize) Reg)
@@ -1798,45 +1731,33 @@
(rule (umull32 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull32) x y high_half)) (rule (umull32 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull32) x y high_half))
;; Helper for generating `asr` instructions. ;; Helper for generating `asr` instructions.
(decl asr64 (Reg Reg) Reg) (decl asr (Type Reg Reg) Reg)
(rule (asr64 x y) (alu_rrr (ALUOp.Asr64) x y)) (rule (asr ty x y) (alu_rrr (ALUOp.Asr) ty x y))
(decl asr64_imm (Reg ImmShift) Reg) (decl asr_imm (Type Reg ImmShift) Reg)
(rule (asr64_imm x imm) (alu_rr_imm_shift (ALUOp.Asr64) x imm)) (rule (asr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Asr) ty x imm))
;; Helper for generating `lsr` instructions. ;; Helper for generating `lsr` instructions.
(decl lsr32 (Reg Reg) Reg) (decl lsr (Type Reg Reg) Reg)
(rule (lsr32 x y) (alu_rrr (ALUOp.Lsr32) x y)) (rule (lsr ty x y) (alu_rrr (ALUOp.Lsr) ty x y))
(decl lsr32_imm (Reg ImmShift) Reg) (decl lsr_imm (Type Reg ImmShift) Reg)
(rule (lsr32_imm x imm) (alu_rr_imm_shift (ALUOp.Lsr32) x imm)) (rule (lsr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsr) ty x imm))
(decl lsr64 (Reg Reg) Reg)
(rule (lsr64 x y) (alu_rrr (ALUOp.Lsr64) x y))
(decl lsr64_imm (Reg ImmShift) Reg)
(rule (lsr64_imm x imm) (alu_rr_imm_shift (ALUOp.Lsr64) x imm))
;; Helper for generating `lsl` instructions. ;; Helper for generating `lsl` instructions.
(decl lsl32 (Reg Reg) Reg) (decl lsl (Type Reg Reg) Reg)
(rule (lsl32 x y) (alu_rrr (ALUOp.Lsl32) x y)) (rule (lsl ty x y) (alu_rrr (ALUOp.Lsl) ty x y))
(decl lsl32_imm (Reg ImmShift) Reg) (decl lsl_imm (Type Reg ImmShift) Reg)
(rule (lsl32_imm x imm) (alu_rr_imm_shift (ALUOp.Lsl32) x imm)) (rule (lsl_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsl) ty x imm))
(decl lsl64 (Reg Reg) Reg)
(rule (lsl64 x y) (alu_rrr (ALUOp.Lsl64) x y))
(decl lsl64_imm (Reg ImmShift) Reg)
(rule (lsl64_imm x imm) (alu_rr_imm_shift (ALUOp.Lsl64) x imm))
;; Helper for generating `udiv` instructions. ;; Helper for generating `udiv` instructions.
(decl udiv64 (Reg Reg) Reg) (decl a64_udiv (Type Reg Reg) Reg)
(rule (udiv64 x y) (alu_rrr (ALUOp.UDiv64) x y)) (rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y))
;; Helper for generating `sdiv` instructions. ;; Helper for generating `sdiv` instructions.
(decl sdiv64 (Reg Reg) Reg) (decl a64_sdiv (Type Reg Reg) Reg)
(rule (sdiv64 x y) (alu_rrr (ALUOp.SDiv64) x y)) (rule (a64_sdiv ty x y) (alu_rrr (ALUOp.SDiv) ty x y))
;; Helper for generating `not` instructions. ;; Helper for generating `not` instructions.
(decl not (Reg VectorSize) Reg) (decl not (Reg VectorSize) Reg)
@@ -1845,46 +1766,26 @@
;; Helpers for generating `orr_not` instructions. ;; Helpers for generating `orr_not` instructions.
(decl orr_not (Type Reg Reg) Reg) (decl orr_not (Type Reg Reg) Reg)
(rule (orr_not (fits_in_32 _ty) x y) (orr_not32 x y)) (rule (orr_not ty x y) (alu_rrr (ALUOp.OrrNot) ty x y))
(rule (orr_not $I64 x y) (orr_not64 x y))
(decl orr_not32 (Reg Reg) Reg)
(rule (orr_not32 x y) (alu_rrr (ALUOp.OrrNot32) x y))
(decl orr_not64 (Reg Reg) Reg)
(rule (orr_not64 x y) (alu_rrr (ALUOp.OrrNot64) x y))
(decl orr_not_shift (Type Reg Reg ShiftOpAndAmt) Reg) (decl orr_not_shift (Type Reg Reg ShiftOpAndAmt) Reg)
(rule (orr_not_shift (fits_in_32 _ty) x y shift) (orr_not_shift32 x y shift)) (rule (orr_not_shift ty x y shift) (alu_rrr_shift (ALUOp.OrrNot) ty x y shift))
(rule (orr_not_shift $I64 x y shift) (orr_not_shift64 x y shift))
(decl orr_not_shift32 (Reg Reg ShiftOpAndAmt) Reg)
(rule (orr_not_shift32 x y shift) (alu_rrr_shift (ALUOp.OrrNot32) x y shift))
(decl orr_not_shift64 (Reg Reg ShiftOpAndAmt) Reg)
(rule (orr_not_shift64 x y shift) (alu_rrr_shift (ALUOp.OrrNot64) x y shift))
;; Helpers for generating `orr` instructions. ;; Helpers for generating `orr` instructions.
(decl orr32 (Reg Reg) Reg) (decl orr (Type Reg Reg) Reg)
(rule (orr32 x y) (alu_rrr (ALUOp.Orr32) x y)) (rule (orr ty x y) (alu_rrr (ALUOp.Orr) ty x y))
(decl orr32_imm (Reg ImmLogic) Reg) (decl orr_imm (Type Reg ImmLogic) Reg)
(rule (orr32_imm x y) (alu_rr_imm_logic (ALUOp.Orr32) x y)) (rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y))
(decl orr64 (Reg Reg) Reg)
(rule (orr64 x y) (alu_rrr (ALUOp.Orr64) x y))
(decl orr64_imm (Reg ImmLogic) Reg)
(rule (orr64_imm x y) (alu_rr_imm_logic (ALUOp.Orr64) x y))
(decl orr_vec (Reg Reg VectorSize) Reg) (decl orr_vec (Reg Reg VectorSize) Reg)
(rule (orr_vec x y size) (vec_rrr (VecALUOp.Orr) x y size)) (rule (orr_vec x y size) (vec_rrr (VecALUOp.Orr) x y size))
;; Helpers for generating `and` instructions. ;; Helpers for generating `and` instructions.
(decl and32_imm (Reg ImmLogic) Reg) (decl and_imm (Type Reg ImmLogic) Reg)
(rule (and32_imm x y) (alu_rr_imm_logic (ALUOp.And32) x y)) (rule (and_imm ty x y) (alu_rr_imm_logic (ALUOp.And) ty x y))
(decl and_vec (Reg Reg VectorSize) Reg) (decl and_vec (Reg Reg VectorSize) Reg)
(rule (and_vec x y size) (vec_rrr (VecALUOp.And) x y size)) (rule (and_vec x y size) (vec_rrr (VecALUOp.And) x y size))
@@ -1907,17 +1808,11 @@
;; Helpers for generating `rotr` instructions. ;; Helpers for generating `rotr` instructions.
(decl rotr32 (Reg Reg) Reg) (decl a64_rotr (Type Reg Reg) Reg)
(rule (rotr32 x y) (alu_rrr (ALUOp.RotR32) x y)) (rule (a64_rotr ty x y) (alu_rrr (ALUOp.RotR) ty x y))
(decl rotr32_imm (Reg ImmShift) Reg) (decl a64_rotr_imm (Type Reg ImmShift) Reg)
(rule (rotr32_imm x y) (alu_rr_imm_shift (ALUOp.RotR32) x y)) (rule (a64_rotr_imm ty x y) (alu_rr_imm_shift (ALUOp.RotR) ty x y))
(decl rotr64 (Reg Reg) Reg)
(rule (rotr64 x y) (alu_rrr (ALUOp.RotR64) x y))
(decl rotr64_imm (Reg ImmShift) Reg)
(rule (rotr64_imm x y) (alu_rr_imm_shift (ALUOp.RotR64) x y))
;; Helpers for generating `rbit` instructions. ;; Helpers for generating `rbit` instructions.
@@ -1945,11 +1840,8 @@
;; Helpers for generating `eon` instructions. ;; Helpers for generating `eon` instructions.
(decl eon32 (Reg Reg) Reg) (decl eon (Type Reg Reg) Reg)
(rule (eon32 x y) (alu_rrr (ALUOp.EorNot32) x y)) (rule (eon ty x y) (alu_rrr (ALUOp.EorNot) ty x y))
(decl eon64 (Reg Reg) Reg)
(rule (eon64 x y) (alu_rrr (ALUOp.EorNot64) x y))
;; Helpers for generating `cnt` instructions. ;; Helpers for generating `cnt` instructions.
@@ -1970,7 +1862,7 @@
;; Weird logical-instruction immediate in ORI using zero register ;; Weird logical-instruction immediate in ORI using zero register
(rule (imm (integral_ty _ty) (imm_logic_from_u64 <$I64 n)) (rule (imm (integral_ty _ty) (imm_logic_from_u64 <$I64 n))
(orr64_imm (zero_reg) n)) (orr_imm $I64 (zero_reg) n))
(decl load_constant64_full (u64) Reg) (decl load_constant64_full (u64) Reg)
(extern constructor load_constant64_full load_constant64_full) (extern constructor load_constant64_full load_constant64_full)
@@ -2033,7 +1925,7 @@
(rule (trap_if_div_overflow ty x y) (rule (trap_if_div_overflow ty x y)
(let ( (let (
;; Check RHS is -1. ;; Check RHS is -1.
(_1 Unit (emit (MInst.AluRRImm12 (adds_op ty) (writable_zero_reg) y (u8_into_imm12 1)))) (_1 Unit (emit (MInst.AluRRImm12 (ALUOp.AddS) (operand_size ty) (writable_zero_reg) y (u8_into_imm12 1))))
;; Check LHS is min_value, by subtracting 1 and branching if ;; Check LHS is min_value, by subtracting 1 and branching if
;; there is overflow. ;; there is overflow.
@@ -2047,11 +1939,6 @@
) )
x)) x))
;; Helper to use either a 32 or 64-bit adds depending on the input type.
(decl adds_op (Type) ALUOp)
(rule (adds_op (fits_in_32 _ty)) (ALUOp.AddS32))
(rule (adds_op $I64) (ALUOp.AddS64))
;; An atomic load that can be sunk into another operation. ;; An atomic load that can be sunk into another operation.
(type SinkableAtomicLoad extern (enum)) (type SinkableAtomicLoad extern (enum))
@@ -2075,36 +1962,36 @@
;; Base case of operating on registers. ;; Base case of operating on registers.
(rule (alu_rs_imm_logic_commutative op ty x y) (rule (alu_rs_imm_logic_commutative op ty x y)
(alu_rrr op (put_in_reg x) (put_in_reg y))) (alu_rrr op ty (put_in_reg x) (put_in_reg y)))
;; Special cases for when one operand is a constant. ;; Special cases for when one operand is a constant.
(rule (alu_rs_imm_logic_commutative op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm)))) (rule (alu_rs_imm_logic_commutative op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm))))
(alu_rr_imm_logic op (put_in_reg x) imm)) (alu_rr_imm_logic op ty (put_in_reg x) imm))
(rule (alu_rs_imm_logic_commutative op ty (def_inst (iconst (imm_logic_from_imm64 <ty imm))) x) (rule (alu_rs_imm_logic_commutative op ty (def_inst (iconst (imm_logic_from_imm64 <ty imm))) x)
(alu_rr_imm_logic op (put_in_reg x) imm)) (alu_rr_imm_logic op ty (put_in_reg x) imm))
;; Special cases for when one operand is shifted left by a constant. ;; Special cases for when one operand is shifted left by a constant.
(rule (alu_rs_imm_logic_commutative op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt)))))) (rule (alu_rs_imm_logic_commutative op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))
(alu_rrr_shift op (put_in_reg x) (put_in_reg y) amt)) (alu_rrr_shift op ty (put_in_reg x) (put_in_reg y) amt))
(rule (alu_rs_imm_logic_commutative op ty (def_inst (ishl x (def_inst (iconst (lshl_from_imm64 <ty amt))))) y) (rule (alu_rs_imm_logic_commutative op ty (def_inst (ishl x (def_inst (iconst (lshl_from_imm64 <ty amt))))) y)
(alu_rrr_shift op (put_in_reg y) (put_in_reg x) amt)) (alu_rrr_shift op ty (put_in_reg y) (put_in_reg x) amt))
;; Same as `alu_rs_imm_logic_commutative` above, except that it doesn't require ;; Same as `alu_rs_imm_logic_commutative` above, except that it doesn't require
;; that the operation is commutative. ;; that the operation is commutative.
(decl alu_rs_imm_logic (ALUOp Type Value Value) Reg) (decl alu_rs_imm_logic (ALUOp Type Value Value) Reg)
(rule (alu_rs_imm_logic op ty x y) (rule (alu_rs_imm_logic op ty x y)
(alu_rrr op (put_in_reg x) (put_in_reg y))) (alu_rrr op ty (put_in_reg x) (put_in_reg y)))
(rule (alu_rs_imm_logic op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm)))) (rule (alu_rs_imm_logic op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm))))
(alu_rr_imm_logic op (put_in_reg x) imm)) (alu_rr_imm_logic op ty (put_in_reg x) imm))
(rule (alu_rs_imm_logic op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt)))))) (rule (alu_rs_imm_logic op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))
(alu_rrr_shift op (put_in_reg x) (put_in_reg y) amt)) (alu_rrr_shift op ty (put_in_reg x) (put_in_reg y) amt))
;; Helper for generating i128 bitops which simply do the same operation to the ;; Helper for generating i128 bitops which simply do the same operation to the
;; hi/lo registers. ;; hi/lo registers.
;; ;;
;; TODO: Support immlogic here ;; TODO: Support immlogic here
(decl i128_alu_bitop (ALUOp Value Value) ValueRegs) (decl i128_alu_bitop (ALUOp Type Value Value) ValueRegs)
(rule (i128_alu_bitop op x y) (rule (i128_alu_bitop op ty x y)
(let ( (let (
(x_regs ValueRegs (put_in_regs x)) (x_regs ValueRegs (put_in_regs x))
(x_lo Reg (value_regs_get x_regs 0)) (x_lo Reg (value_regs_get x_regs 0))
@@ -2114,5 +2001,5 @@
(y_hi Reg (value_regs_get y_regs 1)) (y_hi Reg (value_regs_get y_regs 1))
) )
(value_regs (value_regs
(alu_rrr op x_lo y_lo) (alu_rrr op ty x_lo y_lo)
(alu_rrr op x_hi y_hi)))) (alu_rrr op ty x_hi y_hi))))

View File

@@ -67,7 +67,8 @@ pub fn mem_finalize(
// is a valid base (for SPOffset) which we must handle here. // is a valid base (for SPOffset) which we must handle here.
// Also, SP needs to be the first arg, not second. // Also, SP needs to be the first arg, not second.
let add_inst = Inst::AluRRRExtend { let add_inst = Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: tmp, rd: tmp,
rn: basereg, rn: basereg,
rm: tmp.to_reg(), rm: tmp.to_reg(),
@@ -682,52 +683,48 @@ impl MachInstEmit for Inst {
let mut start_off = sink.cur_offset(); let mut start_off = sink.cur_offset();
match self { match self {
&Inst::AluRRR { alu_op, rd, rn, rm } => { &Inst::AluRRR {
alu_op,
size,
rd,
rn,
rm,
} => {
debug_assert!(match alu_op {
ALUOp::SDiv | ALUOp::UDiv | ALUOp::SMulH | ALUOp::UMulH =>
size == OperandSize::Size64,
_ => true,
});
let top11 = match alu_op { let top11 = match alu_op {
ALUOp::Add32 => 0b00001011_000, ALUOp::Add => 0b00001011_000,
ALUOp::Add64 => 0b10001011_000, ALUOp::Adc => 0b00011010_000,
ALUOp::Adc32 => 0b00011010_000, ALUOp::AdcS => 0b00111010_000,
ALUOp::Adc64 => 0b10011010_000, ALUOp::Sub => 0b01001011_000,
ALUOp::AdcS32 => 0b00111010_000, ALUOp::Sbc => 0b01011010_000,
ALUOp::AdcS64 => 0b10111010_000, ALUOp::SbcS => 0b01111010_000,
ALUOp::Sub32 => 0b01001011_000, ALUOp::Orr => 0b00101010_000,
ALUOp::Sub64 => 0b11001011_000, ALUOp::And => 0b00001010_000,
ALUOp::Sbc32 => 0b01011010_000, ALUOp::AndS => 0b01101010_000,
ALUOp::Sbc64 => 0b11011010_000, ALUOp::Eor => 0b01001010_000,
ALUOp::SbcS32 => 0b01111010_000, ALUOp::OrrNot => 0b00101010_001,
ALUOp::SbcS64 => 0b11111010_000, ALUOp::AndNot => 0b00001010_001,
ALUOp::Orr32 => 0b00101010_000, ALUOp::EorNot => 0b01001010_001,
ALUOp::Orr64 => 0b10101010_000, ALUOp::AddS => 0b00101011_000,
ALUOp::And32 => 0b00001010_000, ALUOp::SubS => 0b01101011_000,
ALUOp::And64 => 0b10001010_000, ALUOp::SDiv => 0b10011010_110,
ALUOp::AndS32 => 0b01101010_000, ALUOp::UDiv => 0b10011010_110,
ALUOp::AndS64 => 0b11101010_000, ALUOp::RotR | ALUOp::Lsr | ALUOp::Asr | ALUOp::Lsl => 0b00011010_110,
ALUOp::Eor32 => 0b01001010_000,
ALUOp::Eor64 => 0b11001010_000,
ALUOp::OrrNot32 => 0b00101010_001,
ALUOp::OrrNot64 => 0b10101010_001,
ALUOp::AndNot32 => 0b00001010_001,
ALUOp::AndNot64 => 0b10001010_001,
ALUOp::EorNot32 => 0b01001010_001,
ALUOp::EorNot64 => 0b11001010_001,
ALUOp::AddS32 => 0b00101011_000,
ALUOp::AddS64 => 0b10101011_000,
ALUOp::SubS32 => 0b01101011_000,
ALUOp::SubS64 => 0b11101011_000,
ALUOp::SDiv64 => 0b10011010_110,
ALUOp::UDiv64 => 0b10011010_110,
ALUOp::RotR32 | ALUOp::Lsr32 | ALUOp::Asr32 | ALUOp::Lsl32 => 0b00011010_110,
ALUOp::RotR64 | ALUOp::Lsr64 | ALUOp::Asr64 | ALUOp::Lsl64 => 0b10011010_110,
ALUOp::SMulH => 0b10011011_010, ALUOp::SMulH => 0b10011011_010,
ALUOp::UMulH => 0b10011011_110, ALUOp::UMulH => 0b10011011_110,
}; };
let top11 = top11 | size.sf_bit() << 10;
let bit15_10 = match alu_op { let bit15_10 = match alu_op {
ALUOp::SDiv64 => 0b000011, ALUOp::SDiv => 0b000011,
ALUOp::UDiv64 => 0b000010, ALUOp::UDiv => 0b000010,
ALUOp::RotR32 | ALUOp::RotR64 => 0b001011, ALUOp::RotR => 0b001011,
ALUOp::Lsr32 | ALUOp::Lsr64 => 0b001001, ALUOp::Lsr => 0b001001,
ALUOp::Asr32 | ALUOp::Asr64 => 0b001010, ALUOp::Asr => 0b001010,
ALUOp::Lsl32 | ALUOp::Lsl64 => 0b001000, ALUOp::Lsl => 0b001000,
ALUOp::SMulH | ALUOp::UMulH => 0b011111, ALUOp::SMulH | ALUOp::UMulH => 0b011111,
_ => 0b000000, _ => 0b000000,
}; };
@@ -755,21 +752,19 @@ impl MachInstEmit for Inst {
} }
&Inst::AluRRImm12 { &Inst::AluRRImm12 {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref imm12, ref imm12,
} => { } => {
let top8 = match alu_op { let top8 = match alu_op {
ALUOp::Add32 => 0b000_10001, ALUOp::Add => 0b000_10001,
ALUOp::Add64 => 0b100_10001, ALUOp::Sub => 0b010_10001,
ALUOp::Sub32 => 0b010_10001, ALUOp::AddS => 0b001_10001,
ALUOp::Sub64 => 0b110_10001, ALUOp::SubS => 0b011_10001,
ALUOp::AddS32 => 0b001_10001,
ALUOp::AddS64 => 0b101_10001,
ALUOp::SubS32 => 0b011_10001,
ALUOp::SubS64 => 0b111_10001,
_ => unimplemented!("{:?}", alu_op), _ => unimplemented!("{:?}", alu_op),
}; };
let top8 = top8 | size.sf_bit() << 7;
sink.put4(enc_arith_rr_imm12( sink.put4(enc_arith_rr_imm12(
top8, top8,
imm12.shift_bits(), imm12.shift_bits(),
@@ -780,57 +775,53 @@ impl MachInstEmit for Inst {
} }
&Inst::AluRRImmLogic { &Inst::AluRRImmLogic {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref imml, ref imml,
} => { } => {
let (top9, inv) = match alu_op { let (top9, inv) = match alu_op {
ALUOp::Orr32 => (0b001_100100, false), ALUOp::Orr => (0b001_100100, false),
ALUOp::Orr64 => (0b101_100100, false), ALUOp::And => (0b000_100100, false),
ALUOp::And32 => (0b000_100100, false), ALUOp::AndS => (0b011_100100, false),
ALUOp::And64 => (0b100_100100, false), ALUOp::Eor => (0b010_100100, false),
ALUOp::AndS32 => (0b011_100100, false), ALUOp::OrrNot => (0b001_100100, true),
ALUOp::AndS64 => (0b111_100100, false), ALUOp::AndNot => (0b000_100100, true),
ALUOp::Eor32 => (0b010_100100, false), ALUOp::EorNot => (0b010_100100, true),
ALUOp::Eor64 => (0b110_100100, false),
ALUOp::OrrNot32 => (0b001_100100, true),
ALUOp::OrrNot64 => (0b101_100100, true),
ALUOp::AndNot32 => (0b000_100100, true),
ALUOp::AndNot64 => (0b100_100100, true),
ALUOp::EorNot32 => (0b010_100100, true),
ALUOp::EorNot64 => (0b110_100100, true),
_ => unimplemented!("{:?}", alu_op), _ => unimplemented!("{:?}", alu_op),
}; };
let top9 = top9 | size.sf_bit() << 8;
let imml = if inv { imml.invert() } else { imml.clone() }; let imml = if inv { imml.invert() } else { imml.clone() };
sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd)); sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd));
} }
&Inst::AluRRImmShift { &Inst::AluRRImmShift {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref immshift, ref immshift,
} => { } => {
let amt = immshift.value(); let amt = immshift.value();
let (top10, immr, imms) = match alu_op { let (top10, immr, imms) = match alu_op {
ALUOp::RotR32 => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)), ALUOp::RotR => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)),
ALUOp::RotR64 => (0b1001001111, machreg_to_gpr(rn), u32::from(amt)), ALUOp::Lsr => (0b0101001100, u32::from(amt), 0b011111),
ALUOp::Lsr32 => (0b0101001100, u32::from(amt), 0b011111), ALUOp::Asr => (0b0001001100, u32::from(amt), 0b011111),
ALUOp::Lsr64 => (0b1101001101, u32::from(amt), 0b111111), ALUOp::Lsl => {
ALUOp::Asr32 => (0b0001001100, u32::from(amt), 0b011111), let bits = if size.is64() { 64 } else { 32 };
ALUOp::Asr64 => (0b1001001101, u32::from(amt), 0b111111), (
ALUOp::Lsl32 => (
0b0101001100, 0b0101001100,
u32::from((32 - amt) % 32), u32::from((bits - amt) % bits),
u32::from(31 - amt), u32::from(bits - 1 - amt),
), )
ALUOp::Lsl64 => ( }
0b1101001101,
u32::from((64 - amt) % 64),
u32::from(63 - amt),
),
_ => unimplemented!("{:?}", alu_op), _ => unimplemented!("{:?}", alu_op),
}; };
let top10 = top10 | size.sf_bit() << 9 | size.sf_bit();
let imms = match alu_op {
ALUOp::Lsr | ALUOp::Asr => imms | size.sf_bit() << 5,
_ => imms,
};
sink.put4( sink.put4(
(top10 << 22) (top10 << 22)
| (immr << 16) | (immr << 16)
@@ -842,36 +833,27 @@ impl MachInstEmit for Inst {
&Inst::AluRRRShift { &Inst::AluRRRShift {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
rm, rm,
ref shiftop, ref shiftop,
} => { } => {
let top11: u32 = match alu_op { let top11: u32 = match alu_op {
ALUOp::Add32 => 0b000_01011000, ALUOp::Add => 0b000_01011000,
ALUOp::Add64 => 0b100_01011000, ALUOp::AddS => 0b001_01011000,
ALUOp::AddS32 => 0b001_01011000, ALUOp::Sub => 0b010_01011000,
ALUOp::AddS64 => 0b101_01011000, ALUOp::SubS => 0b011_01011000,
ALUOp::Sub32 => 0b010_01011000, ALUOp::Orr => 0b001_01010000,
ALUOp::Sub64 => 0b110_01011000, ALUOp::And => 0b000_01010000,
ALUOp::SubS32 => 0b011_01011000, ALUOp::AndS => 0b011_01010000,
ALUOp::SubS64 => 0b111_01011000, ALUOp::Eor => 0b010_01010000,
ALUOp::Orr32 => 0b001_01010000, ALUOp::OrrNot => 0b001_01010001,
ALUOp::Orr64 => 0b101_01010000, ALUOp::EorNot => 0b010_01010001,
ALUOp::And32 => 0b000_01010000, ALUOp::AndNot => 0b000_01010001,
ALUOp::And64 => 0b100_01010000,
ALUOp::AndS32 => 0b011_01010000,
ALUOp::AndS64 => 0b111_01010000,
ALUOp::Eor32 => 0b010_01010000,
ALUOp::Eor64 => 0b110_01010000,
ALUOp::OrrNot32 => 0b001_01010001,
ALUOp::OrrNot64 => 0b101_01010001,
ALUOp::EorNot32 => 0b010_01010001,
ALUOp::EorNot64 => 0b110_01010001,
ALUOp::AndNot32 => 0b000_01010001,
ALUOp::AndNot64 => 0b100_01010001,
_ => unimplemented!("{:?}", alu_op), _ => unimplemented!("{:?}", alu_op),
}; };
let top11 = top11 | size.sf_bit() << 10;
let top11 = top11 | (u32::from(shiftop.op().bits()) << 1); let top11 = top11 | (u32::from(shiftop.op().bits()) << 1);
let bits_15_10 = u32::from(shiftop.amt().value()); let bits_15_10 = u32::from(shiftop.amt().value());
sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
@@ -879,22 +861,20 @@ impl MachInstEmit for Inst {
&Inst::AluRRRExtend { &Inst::AluRRRExtend {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
rm, rm,
extendop, extendop,
} => { } => {
let top11: u32 = match alu_op { let top11: u32 = match alu_op {
ALUOp::Add32 => 0b00001011001, ALUOp::Add => 0b00001011001,
ALUOp::Add64 => 0b10001011001, ALUOp::Sub => 0b01001011001,
ALUOp::Sub32 => 0b01001011001, ALUOp::AddS => 0b00101011001,
ALUOp::Sub64 => 0b11001011001, ALUOp::SubS => 0b01101011001,
ALUOp::AddS32 => 0b00101011001,
ALUOp::AddS64 => 0b10101011001,
ALUOp::SubS32 => 0b01101011001,
ALUOp::SubS64 => 0b11101011001,
_ => unimplemented!("{:?}", alu_op), _ => unimplemented!("{:?}", alu_op),
}; };
let top11 = top11 | size.sf_bit() << 10;
let bits_15_10 = u32::from(extendop.bits()) << 3; let bits_15_10 = u32::from(extendop.bits()) << 3;
sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
} }
@@ -1394,7 +1374,8 @@ impl MachInstEmit for Inst {
// mvn x28, x28 // mvn x28, x28
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::And64, alu_op: ALUOp::And,
size: OperandSize::Size64,
rd: x28wr, rd: x28wr,
rn: x27, rn: x27,
rm: x26, rm: x26,
@@ -1402,7 +1383,8 @@ impl MachInstEmit for Inst {
.emit(sink, emit_info, state); .emit(sink, emit_info, state);
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::OrrNot64, alu_op: ALUOp::OrrNot,
size: OperandSize::Size64,
rd: x28wr, rd: x28wr,
rn: xzr, rn: xzr,
rm: x28, rm: x28,
@@ -1425,11 +1407,8 @@ impl MachInstEmit for Inst {
}; };
Inst::AluRRR { Inst::AluRRR {
alu_op: if ty == I64 { alu_op: ALUOp::SubS,
ALUOp::SubS64 size: OperandSize::from_ty(ty),
} else {
ALUOp::SubS32
},
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: x27, rn: x27,
rm: x26, rm: x26,
@@ -1447,11 +1426,11 @@ impl MachInstEmit for Inst {
_ => { _ => {
// add/sub/and/orr/eor x28, x27, x26 // add/sub/and/orr/eor x28, x27, x26
let alu_op = match op { let alu_op = match op {
AtomicRmwOp::Add => ALUOp::Add64, AtomicRmwOp::Add => ALUOp::Add,
AtomicRmwOp::Sub => ALUOp::Sub64, AtomicRmwOp::Sub => ALUOp::Sub,
AtomicRmwOp::And => ALUOp::And64, AtomicRmwOp::And => ALUOp::And,
AtomicRmwOp::Or => ALUOp::Orr64, AtomicRmwOp::Or => ALUOp::Orr,
AtomicRmwOp::Xor => ALUOp::Eor64, AtomicRmwOp::Xor => ALUOp::Eor,
AtomicRmwOp::Nand AtomicRmwOp::Nand
| AtomicRmwOp::Umin | AtomicRmwOp::Umin
| AtomicRmwOp::Umax | AtomicRmwOp::Umax
@@ -1462,6 +1441,7 @@ impl MachInstEmit for Inst {
Inst::AluRRR { Inst::AluRRR {
alu_op, alu_op,
size: OperandSize::Size64,
rd: x28wr, rd: x28wr,
rn: x27, rn: x27,
rm: x26, rm: x26,
@@ -2478,7 +2458,8 @@ impl MachInstEmit for Inst {
// than AND on smaller cores. // than AND on smaller cores.
let imml = ImmLogic::maybe_from_u64(1, I32).unwrap(); let imml = ImmLogic::maybe_from_u64(1, I32).unwrap();
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::And32, alu_op: ALUOp::And,
size: OperandSize::Size32,
rd, rd,
rn, rn,
imml, imml,
@@ -2655,7 +2636,8 @@ impl MachInstEmit for Inst {
inst.emit(sink, emit_info, state); inst.emit(sink, emit_info, state);
// Add base of jump table to jump-table-sourced block offset // Add base of jump table to jump-table-sourced block offset
let inst = Inst::AluRRR { let inst = Inst::AluRRR {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: rtmp1, rd: rtmp1,
rn: rtmp1.to_reg(), rn: rtmp1.to_reg(),
rm: rtmp2.to_reg(), rm: rtmp2.to_reg(),
@@ -2731,15 +2713,12 @@ impl MachInstEmit for Inst {
} else { } else {
offset as u64 offset as u64
}; };
let alu_op = if offset < 0 { let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
ALUOp::Sub64
} else {
ALUOp::Add64
};
if let Some((idx, extendop)) = index_reg { if let Some((idx, extendop)) = index_reg {
let add = Inst::AluRRRExtend { let add = Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
rm: idx, rm: idx,
@@ -2756,6 +2735,7 @@ impl MachInstEmit for Inst {
} else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
let add = Inst::AluRRImm12 { let add = Inst::AluRRImm12 {
alu_op, alu_op,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
imm12, imm12,
@@ -2775,6 +2755,7 @@ impl MachInstEmit for Inst {
} }
let add = Inst::AluRRR { let add = Inst::AluRRR {
alu_op, alu_op,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
rm: tmp.to_reg(), rm: tmp.to_reg(),

View File

@@ -32,7 +32,8 @@ fn test_aarch64_binemit() {
insns.push((Inst::Nop4, "1F2003D5", "nop")); insns.push((Inst::Nop4, "1F2003D5", "nop"));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -42,7 +43,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -52,7 +54,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Adc32, alu_op: ALUOp::Adc,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -62,7 +65,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Adc64, alu_op: ALUOp::Adc,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -72,7 +76,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AdcS32, alu_op: ALUOp::AdcS,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -82,7 +87,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AdcS64, alu_op: ALUOp::AdcS,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -92,7 +98,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Sub32, alu_op: ALUOp::Sub,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -102,7 +109,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Sub64, alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -112,7 +120,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Sbc32, alu_op: ALUOp::Sbc,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -122,7 +131,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Sbc64, alu_op: ALUOp::Sbc,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -132,7 +142,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SbcS32, alu_op: ALUOp::SbcS,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -142,7 +153,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SbcS64, alu_op: ALUOp::SbcS,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -153,7 +165,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Orr32, alu_op: ALUOp::Orr,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -163,7 +176,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -173,7 +187,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::And32, alu_op: ALUOp::And,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -183,7 +198,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::And64, alu_op: ALUOp::And,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -193,7 +209,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AndS32, alu_op: ALUOp::AndS,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -203,7 +220,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AndS64, alu_op: ALUOp::AndS,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -213,7 +231,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -224,7 +243,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -234,7 +254,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -244,7 +265,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AddS32, alu_op: ALUOp::AddS,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -254,7 +276,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AddS64, alu_op: ALUOp::AddS,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -264,7 +287,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::AddS64, alu_op: ALUOp::AddS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: xreg(5), rn: xreg(5),
imm12: Imm12::maybe_from_u64(1).unwrap(), imm12: Imm12::maybe_from_u64(1).unwrap(),
@@ -275,7 +299,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SDiv64, alu_op: ALUOp::SDiv,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -285,7 +310,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::UDiv64, alu_op: ALUOp::UDiv,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -296,7 +322,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Eor32, alu_op: ALUOp::Eor,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -306,7 +333,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Eor64, alu_op: ALUOp::Eor,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -316,7 +344,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AndNot32, alu_op: ALUOp::AndNot,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -326,7 +355,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::AndNot64, alu_op: ALUOp::AndNot,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -336,7 +366,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::OrrNot32, alu_op: ALUOp::OrrNot,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -346,7 +377,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::OrrNot64, alu_op: ALUOp::OrrNot,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -356,7 +388,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::EorNot32, alu_op: ALUOp::EorNot,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -366,7 +399,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::EorNot64, alu_op: ALUOp::EorNot,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -377,7 +411,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::RotR32, alu_op: ALUOp::RotR,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -387,7 +422,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::RotR64, alu_op: ALUOp::RotR,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -397,7 +433,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Lsr32, alu_op: ALUOp::Lsr,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -407,7 +444,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Lsr64, alu_op: ALUOp::Lsr,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -417,7 +455,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Asr32, alu_op: ALUOp::Asr,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -427,7 +466,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Asr64, alu_op: ALUOp::Asr,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -437,7 +477,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Lsl32, alu_op: ALUOp::Lsl,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -447,7 +488,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::Lsl64, alu_op: ALUOp::Lsl,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
rm: xreg(6), rm: xreg(6),
@@ -458,7 +500,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -471,7 +514,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -484,7 +528,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -497,7 +542,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::Sub32, alu_op: ALUOp::Sub,
size: OperandSize::Size32,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -510,7 +556,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::Sub64, alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -523,7 +570,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -536,7 +584,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImm12 { Inst::AluRRImm12 {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
imm12: Imm12 { imm12: Imm12 {
@@ -550,7 +599,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRExtend { Inst::AluRRRExtend {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(8), rn: xreg(8),
rm: xreg(9), rm: xreg(9),
@@ -562,7 +612,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRExtend { Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: writable_xreg(15), rd: writable_xreg(15),
rn: xreg(16), rn: xreg(16),
rm: xreg(17), rm: xreg(17),
@@ -574,7 +625,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRExtend { Inst::AluRRRExtend {
alu_op: ALUOp::Sub32, alu_op: ALUOp::Sub,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -586,7 +638,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRExtend { Inst::AluRRRExtend {
alu_op: ALUOp::Sub64, alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: writable_xreg(20), rd: writable_xreg(20),
rn: xreg(21), rn: xreg(21),
rm: xreg(22), rm: xreg(22),
@@ -598,7 +651,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -612,7 +666,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -626,7 +681,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Sub32, alu_op: ALUOp::Sub,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -640,7 +696,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Sub64, alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -654,7 +711,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Orr32, alu_op: ALUOp::Orr,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -668,7 +726,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -682,7 +741,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::And32, alu_op: ALUOp::And,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -696,7 +756,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::And64, alu_op: ALUOp::And,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -710,7 +771,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AndS32, alu_op: ALUOp::AndS,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -724,7 +786,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AndS64, alu_op: ALUOp::AndS,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -738,7 +801,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Eor32, alu_op: ALUOp::Eor,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -752,7 +816,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::Eor64, alu_op: ALUOp::Eor,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -766,7 +831,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::OrrNot32, alu_op: ALUOp::OrrNot,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -780,7 +846,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::OrrNot64, alu_op: ALUOp::OrrNot,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -794,7 +861,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AndNot32, alu_op: ALUOp::AndNot,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -808,7 +876,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AndNot64, alu_op: ALUOp::AndNot,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -822,7 +891,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::EorNot32, alu_op: ALUOp::EorNot,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -836,7 +906,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::EorNot64, alu_op: ALUOp::EorNot,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -850,7 +921,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AddS32, alu_op: ALUOp::AddS,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -864,7 +936,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::AddS64, alu_op: ALUOp::AddS,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -878,7 +951,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -892,7 +966,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRRShift { Inst::AluRRRShift {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
rm: xreg(12), rm: xreg(12),
@@ -907,7 +982,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRRExtend { Inst::AluRRRExtend {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: stack_reg(), rn: stack_reg(),
rm: xreg(12), rm: xreg(12),
@@ -964,6 +1040,7 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::SMulH, alu_op: ALUOp::SMulH,
size: OperandSize::Size64,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -974,6 +1051,7 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRR { Inst::AluRRR {
alu_op: ALUOp::UMulH, alu_op: ALUOp::UMulH,
size: OperandSize::Size64,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(2), rn: xreg(2),
rm: xreg(3), rm: xreg(3),
@@ -984,7 +1062,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::RotR32, alu_op: ALUOp::RotR,
size: OperandSize::Size32,
rd: writable_xreg(20), rd: writable_xreg(20),
rn: xreg(21), rn: xreg(21),
immshift: ImmShift::maybe_from_u64(19).unwrap(), immshift: ImmShift::maybe_from_u64(19).unwrap(),
@@ -994,7 +1073,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::RotR64, alu_op: ALUOp::RotR,
size: OperandSize::Size64,
rd: writable_xreg(20), rd: writable_xreg(20),
rn: xreg(21), rn: xreg(21),
immshift: ImmShift::maybe_from_u64(42).unwrap(), immshift: ImmShift::maybe_from_u64(42).unwrap(),
@@ -1004,7 +1084,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsr32, alu_op: ALUOp::Lsr,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
immshift: ImmShift::maybe_from_u64(13).unwrap(), immshift: ImmShift::maybe_from_u64(13).unwrap(),
@@ -1014,7 +1095,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsr64, alu_op: ALUOp::Lsr,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
immshift: ImmShift::maybe_from_u64(57).unwrap(), immshift: ImmShift::maybe_from_u64(57).unwrap(),
@@ -1024,7 +1106,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Asr32, alu_op: ALUOp::Asr,
size: OperandSize::Size32,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
immshift: ImmShift::maybe_from_u64(7).unwrap(), immshift: ImmShift::maybe_from_u64(7).unwrap(),
@@ -1034,7 +1117,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Asr64, alu_op: ALUOp::Asr,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
immshift: ImmShift::maybe_from_u64(35).unwrap(), immshift: ImmShift::maybe_from_u64(35).unwrap(),
@@ -1044,7 +1128,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsl32, alu_op: ALUOp::Lsl,
size: OperandSize::Size32,
rd: writable_xreg(8), rd: writable_xreg(8),
rn: xreg(9), rn: xreg(9),
immshift: ImmShift::maybe_from_u64(24).unwrap(), immshift: ImmShift::maybe_from_u64(24).unwrap(),
@@ -1054,7 +1139,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsl64, alu_op: ALUOp::Lsl,
size: OperandSize::Size64,
rd: writable_xreg(8), rd: writable_xreg(8),
rn: xreg(9), rn: xreg(9),
immshift: ImmShift::maybe_from_u64(63).unwrap(), immshift: ImmShift::maybe_from_u64(63).unwrap(),
@@ -1064,7 +1150,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsl32, alu_op: ALUOp::Lsl,
size: OperandSize::Size32,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
immshift: ImmShift::maybe_from_u64(0).unwrap(), immshift: ImmShift::maybe_from_u64(0).unwrap(),
@@ -1074,7 +1161,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmShift { Inst::AluRRImmShift {
alu_op: ALUOp::Lsl64, alu_op: ALUOp::Lsl,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(11), rn: xreg(11),
immshift: ImmShift::maybe_from_u64(0).unwrap(), immshift: ImmShift::maybe_from_u64(0).unwrap(),
@@ -1085,7 +1173,8 @@ fn test_aarch64_binemit() {
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::And32, alu_op: ALUOp::And,
size: OperandSize::Size32,
rd: writable_xreg(21), rd: writable_xreg(21),
rn: xreg(27), rn: xreg(27),
imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(), imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(),
@@ -1095,7 +1184,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::And64, alu_op: ALUOp::And,
size: OperandSize::Size64,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(6), rn: xreg(6),
imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(), imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(),
@@ -1105,7 +1195,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::AndS32, alu_op: ALUOp::AndS,
size: OperandSize::Size32,
rd: writable_xreg(21), rd: writable_xreg(21),
rn: xreg(27), rn: xreg(27),
imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(), imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(),
@@ -1115,7 +1206,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::AndS64, alu_op: ALUOp::AndS,
size: OperandSize::Size64,
rd: writable_xreg(7), rd: writable_xreg(7),
rn: xreg(6), rn: xreg(6),
imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(), imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(),
@@ -1125,7 +1217,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::Orr32, alu_op: ALUOp::Orr,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(5), rn: xreg(5),
imml: ImmLogic::maybe_from_u64(0x100000, I32).unwrap(), imml: ImmLogic::maybe_from_u64(0x100000, I32).unwrap(),
@@ -1135,7 +1228,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd: writable_xreg(4), rd: writable_xreg(4),
rn: xreg(5), rn: xreg(5),
imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(), imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(),
@@ -1145,7 +1239,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::Eor32, alu_op: ALUOp::Eor,
size: OperandSize::Size32,
rd: writable_xreg(1), rd: writable_xreg(1),
rn: xreg(5), rn: xreg(5),
imml: ImmLogic::maybe_from_u64(0x00007fff, I32).unwrap(), imml: ImmLogic::maybe_from_u64(0x00007fff, I32).unwrap(),
@@ -1155,7 +1250,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::AluRRImmLogic { Inst::AluRRImmLogic {
alu_op: ALUOp::Eor64, alu_op: ALUOp::Eor,
size: OperandSize::Size64,
rd: writable_xreg(10), rd: writable_xreg(10),
rn: xreg(8), rn: xreg(8),
imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(), imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(),

View File

@@ -168,7 +168,8 @@ impl Inst {
} else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) { } else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) {
// Weird logical-instruction immediate in ORI using zero register // Weird logical-instruction immediate in ORI using zero register
smallvec![Inst::AluRRImmLogic { smallvec![Inst::AluRRImmLogic {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd, rd,
rn: zero_reg(), rn: zero_reg(),
imml, imml,
@@ -2097,58 +2098,45 @@ impl PrettyPrint for Inst {
impl Inst { impl Inst {
fn print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String { fn print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String {
fn op_name_size(alu_op: ALUOp) -> (&'static str, OperandSize) { fn op_name(alu_op: ALUOp) -> &'static str {
match alu_op { match alu_op {
ALUOp::Add32 => ("add", OperandSize::Size32), ALUOp::Add => "add",
ALUOp::Add64 => ("add", OperandSize::Size64), ALUOp::Sub => "sub",
ALUOp::Sub32 => ("sub", OperandSize::Size32), ALUOp::Orr => "orr",
ALUOp::Sub64 => ("sub", OperandSize::Size64), ALUOp::And => "and",
ALUOp::Orr32 => ("orr", OperandSize::Size32), ALUOp::AndS => "ands",
ALUOp::Orr64 => ("orr", OperandSize::Size64), ALUOp::Eor => "eor",
ALUOp::And32 => ("and", OperandSize::Size32), ALUOp::AddS => "adds",
ALUOp::And64 => ("and", OperandSize::Size64), ALUOp::SubS => "subs",
ALUOp::AndS32 => ("ands", OperandSize::Size32), ALUOp::SMulH => "smulh",
ALUOp::AndS64 => ("ands", OperandSize::Size64), ALUOp::UMulH => "umulh",
ALUOp::Eor32 => ("eor", OperandSize::Size32), ALUOp::SDiv => "sdiv",
ALUOp::Eor64 => ("eor", OperandSize::Size64), ALUOp::UDiv => "udiv",
ALUOp::AddS32 => ("adds", OperandSize::Size32), ALUOp::AndNot => "bic",
ALUOp::AddS64 => ("adds", OperandSize::Size64), ALUOp::OrrNot => "orn",
ALUOp::SubS32 => ("subs", OperandSize::Size32), ALUOp::EorNot => "eon",
ALUOp::SubS64 => ("subs", OperandSize::Size64), ALUOp::RotR => "ror",
ALUOp::SMulH => ("smulh", OperandSize::Size64), ALUOp::Lsr => "lsr",
ALUOp::UMulH => ("umulh", OperandSize::Size64), ALUOp::Asr => "asr",
ALUOp::SDiv64 => ("sdiv", OperandSize::Size64), ALUOp::Lsl => "lsl",
ALUOp::UDiv64 => ("udiv", OperandSize::Size64), ALUOp::Adc => "adc",
ALUOp::AndNot32 => ("bic", OperandSize::Size32), ALUOp::AdcS => "adcs",
ALUOp::AndNot64 => ("bic", OperandSize::Size64), ALUOp::Sbc => "sbc",
ALUOp::OrrNot32 => ("orn", OperandSize::Size32), ALUOp::SbcS => "sbcs",
ALUOp::OrrNot64 => ("orn", OperandSize::Size64),
ALUOp::EorNot32 => ("eon", OperandSize::Size32),
ALUOp::EorNot64 => ("eon", OperandSize::Size64),
ALUOp::RotR32 => ("ror", OperandSize::Size32),
ALUOp::RotR64 => ("ror", OperandSize::Size64),
ALUOp::Lsr32 => ("lsr", OperandSize::Size32),
ALUOp::Lsr64 => ("lsr", OperandSize::Size64),
ALUOp::Asr32 => ("asr", OperandSize::Size32),
ALUOp::Asr64 => ("asr", OperandSize::Size64),
ALUOp::Lsl32 => ("lsl", OperandSize::Size32),
ALUOp::Lsl64 => ("lsl", OperandSize::Size64),
ALUOp::Adc32 => ("adc", OperandSize::Size32),
ALUOp::Adc64 => ("adc", OperandSize::Size64),
ALUOp::AdcS32 => ("adcs", OperandSize::Size32),
ALUOp::AdcS64 => ("adcs", OperandSize::Size64),
ALUOp::Sbc32 => ("sbc", OperandSize::Size32),
ALUOp::Sbc64 => ("sbc", OperandSize::Size64),
ALUOp::SbcS32 => ("sbcs", OperandSize::Size32),
ALUOp::SbcS64 => ("sbcs", OperandSize::Size64),
} }
} }
match self { match self {
&Inst::Nop0 => "nop-zero-len".to_string(), &Inst::Nop0 => "nop-zero-len".to_string(),
&Inst::Nop4 => "nop".to_string(), &Inst::Nop4 => "nop".to_string(),
&Inst::AluRRR { alu_op, rd, rn, rm } => { &Inst::AluRRR {
let (op, size) = op_name_size(alu_op); alu_op,
size,
rd,
rn,
rm,
} => {
let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size);
@@ -2176,15 +2164,16 @@ impl Inst {
} }
&Inst::AluRRImm12 { &Inst::AluRRImm12 {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref imm12, ref imm12,
} => { } => {
let (op, size) = op_name_size(alu_op); let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
if imm12.bits == 0 && alu_op == ALUOp::Add64 { if imm12.bits == 0 && alu_op == ALUOp::Add && size.is64() {
// special-case MOV (used for moving into SP). // special-case MOV (used for moving into SP).
format!("mov {}, {}", rd, rn) format!("mov {}, {}", rd, rn)
} else { } else {
@@ -2194,11 +2183,12 @@ impl Inst {
} }
&Inst::AluRRImmLogic { &Inst::AluRRImmLogic {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref imml, ref imml,
} => { } => {
let (op, size) = op_name_size(alu_op); let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
let imml = imml.show_rru(mb_rru); let imml = imml.show_rru(mb_rru);
@@ -2206,11 +2196,12 @@ impl Inst {
} }
&Inst::AluRRImmShift { &Inst::AluRRImmShift {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
ref immshift, ref immshift,
} => { } => {
let (op, size) = op_name_size(alu_op); let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
let immshift = immshift.show_rru(mb_rru); let immshift = immshift.show_rru(mb_rru);
@@ -2218,12 +2209,13 @@ impl Inst {
} }
&Inst::AluRRRShift { &Inst::AluRRRShift {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
rm, rm,
ref shiftop, ref shiftop,
} => { } => {
let (op, size) = op_name_size(alu_op); let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size);
@@ -2232,12 +2224,13 @@ impl Inst {
} }
&Inst::AluRRRExtend { &Inst::AluRRRExtend {
alu_op, alu_op,
size,
rd, rd,
rn, rn,
rm, rm,
ref extendop, ref extendop,
} => { } => {
let (op, size) = op_name_size(alu_op); let op = op_name(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size);
@@ -3419,15 +3412,12 @@ impl Inst {
} else { } else {
offset as u64 offset as u64
}; };
let alu_op = if offset < 0 { let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
ALUOp::Sub64
} else {
ALUOp::Add64
};
if let Some((idx, extendop)) = index_reg { if let Some((idx, extendop)) = index_reg {
let add = Inst::AluRRRExtend { let add = Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
rm: idx, rm: idx,
@@ -3441,6 +3431,7 @@ impl Inst {
} else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
let add = Inst::AluRRImm12 { let add = Inst::AluRRImm12 {
alu_op, alu_op,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
imm12, imm12,
@@ -3453,6 +3444,7 @@ impl Inst {
} }
let add = Inst::AluRRR { let add = Inst::AluRRR {
alu_op, alu_op,
size: OperandSize::Size64,
rd, rd,
rn: reg, rn: reg,
rm: tmp.to_reg(), rm: tmp.to_reg(),

View File

@@ -91,8 +91,8 @@
;; the actual addition is `adds` followed by `adc` which comprises the ;; the actual addition is `adds` followed by `adc` which comprises the
;; low/high bits of the result ;; low/high bits of the result
(with_flags (with_flags
(add64_with_flags x_lo y_lo) (add_with_flags $I64 x_lo y_lo)
(adc64 x_hi y_hi)))) (adc $I64 x_hi y_hi))))
;;;; Rules for `isub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `isub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -142,8 +142,8 @@
;; the actual subtraction is `subs` followed by `sbc` which comprises ;; the actual subtraction is `subs` followed by `sbc` which comprises
;; the low/high bits of the result ;; the low/high bits of the result
(with_flags (with_flags
(sub64_with_flags x_lo y_lo) (sub_with_flags $I64 x_lo y_lo)
(sbc64 x_hi y_hi)))) (sbc $I64 x_hi y_hi))))
;;;; Rules for `uadd_sat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `uadd_sat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -203,7 +203,7 @@
;; madd dst_hi, x_lo, y_hi, dst_hi ;; madd dst_hi, x_lo, y_hi, dst_hi
;; madd dst_hi, x_hi, y_lo, dst_hi ;; madd dst_hi, x_hi, y_lo, dst_hi
;; madd dst_lo, x_lo, y_lo, zero ;; madd dst_lo, x_lo, y_lo, zero
(dst_hi1 Reg (umulh x_lo y_lo)) (dst_hi1 Reg (umulh $I64 x_lo y_lo))
(dst_hi2 Reg (madd64 x_lo y_hi dst_hi1)) (dst_hi2 Reg (madd64 x_lo y_hi dst_hi1))
(dst_hi Reg (madd64 x_hi y_lo dst_hi2)) (dst_hi Reg (madd64 x_hi y_lo dst_hi2))
(dst_lo Reg (madd64 x_lo y_lo (zero_reg))) (dst_lo Reg (madd64 x_lo y_lo (zero_reg)))
@@ -358,28 +358,28 @@
;;;; Rules for `smulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `smulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type $I64 (smulhi x y))) (rule (lower (has_type $I64 (smulhi x y)))
(value_reg (smulh (put_in_reg x) (put_in_reg y)))) (value_reg (smulh $I64 (put_in_reg x) (put_in_reg y))))
(rule (lower (has_type (fits_in_32 ty) (smulhi x y))) (rule (lower (has_type (fits_in_32 ty) (smulhi x y)))
(let ( (let (
(x64 Reg (put_in_reg_sext64 x)) (x64 Reg (put_in_reg_sext64 x))
(y64 Reg (put_in_reg_sext64 y)) (y64 Reg (put_in_reg_sext64 y))
(mul Reg (madd64 x64 y64 (zero_reg))) (mul Reg (madd64 x64 y64 (zero_reg)))
(result Reg (asr64_imm mul (imm_shift_from_u8 (ty_bits ty)))) (result Reg (asr_imm $I64 mul (imm_shift_from_u8 (ty_bits ty))))
) )
(value_reg result))) (value_reg result)))
;;;; Rules for `umulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `umulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type $I64 (umulhi x y))) (rule (lower (has_type $I64 (umulhi x y)))
(value_reg (umulh (put_in_reg x) (put_in_reg y)))) (value_reg (umulh $I64 (put_in_reg x) (put_in_reg y))))
(rule (lower (has_type (fits_in_32 ty) (umulhi x y))) (rule (lower (has_type (fits_in_32 ty) (umulhi x y)))
(let ( (let (
(x64 Reg (put_in_reg_zext64 x)) (x64 Reg (put_in_reg_zext64 x))
(y64 Reg (put_in_reg_zext64 y)) (y64 Reg (put_in_reg_zext64 y))
(mul Reg (madd64 x64 y64 (zero_reg))) (mul Reg (madd64 x64 y64 (zero_reg)))
(result Reg (lsr64_imm mul (imm_shift_from_u8 (ty_bits ty)))) (result Reg (lsr_imm $I64 mul (imm_shift_from_u8 (ty_bits ty))))
) )
(value_reg result))) (value_reg result)))
@@ -391,7 +391,7 @@
;; Note that aarch64's `udiv` doesn't trap so to respect the semantics of ;; Note that aarch64's `udiv` doesn't trap so to respect the semantics of
;; CLIF's `udiv` the check for zero needs to be manually performed. ;; CLIF's `udiv` the check for zero needs to be manually performed.
(rule (lower (has_type (fits_in_64 ty) (udiv x y))) (rule (lower (has_type (fits_in_64 ty) (udiv x y)))
(value_reg (udiv64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y)))) (value_reg (a64_udiv $I64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y))))
;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero.
(decl put_nonzero_in_reg_zext64 (Value) Reg) (decl put_nonzero_in_reg_zext64 (Value) Reg)
@@ -428,7 +428,7 @@
(x64 Reg (put_in_reg_sext64 x)) (x64 Reg (put_in_reg_sext64 x))
(y64 Reg (put_nonzero_in_reg_sext64 y)) (y64 Reg (put_nonzero_in_reg_sext64 y))
(valid_x64 Reg (trap_if_div_overflow ty x64 y64)) (valid_x64 Reg (trap_if_div_overflow ty x64 y64))
(result Reg (sdiv64 valid_x64 y64)) (result Reg (a64_sdiv $I64 valid_x64 y64))
) )
(value_reg result))) (value_reg result)))
@@ -439,7 +439,7 @@
;; Special case for `sdiv` where no checks are needed due to division by a ;; Special case for `sdiv` where no checks are needed due to division by a
;; constant meaning the checks are always passed. ;; constant meaning the checks are always passed.
(rule (lower (has_type (fits_in_64 ty) (sdiv x (def_inst (iconst (safe_divisor_from_imm64 y)))))) (rule (lower (has_type (fits_in_64 ty) (sdiv x (def_inst (iconst (safe_divisor_from_imm64 y))))))
(value_reg (sdiv64 (put_in_reg_sext64 x) (imm ty y)))) (value_reg (a64_sdiv $I64 (put_in_reg_sext64 x) (imm ty y))))
;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero.
(decl put_nonzero_in_reg_sext64 (Value) Reg) (decl put_nonzero_in_reg_sext64 (Value) Reg)
@@ -470,7 +470,7 @@
(let ( (let (
(x64 Reg (put_in_reg_zext64 x)) (x64 Reg (put_in_reg_zext64 x))
(y64 Reg (put_nonzero_in_reg_zext64 y)) (y64 Reg (put_nonzero_in_reg_zext64 y))
(div Reg (udiv64 x64 y64)) (div Reg (a64_udiv $I64 x64 y64))
(result Reg (msub64 div y64 x64)) (result Reg (msub64 div y64 x64))
) )
(value_reg result))) (value_reg result)))
@@ -479,7 +479,7 @@
(let ( (let (
(x64 Reg (put_in_reg_sext64 x)) (x64 Reg (put_in_reg_sext64 x))
(y64 Reg (put_nonzero_in_reg_sext64 y)) (y64 Reg (put_nonzero_in_reg_sext64 y))
(div Reg (sdiv64 x64 y64)) (div Reg (a64_sdiv $I64 x64 y64))
(result Reg (msub64 div y64 x64)) (result Reg (msub64 div y64 x64))
) )
(value_reg result))) (value_reg result)))
@@ -537,7 +537,7 @@
(rule (lower (has_type $I128 (sextend x))) (rule (lower (has_type $I128 (sextend x)))
(let ( (let (
(lo Reg (put_in_reg_sext64 x)) (lo Reg (put_in_reg_sext64 x))
(hi Reg (asr64_imm lo (imm_shift_from_u8 63))) (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63)))
) )
(value_regs lo hi))) (value_regs lo hi)))
@@ -554,7 +554,7 @@
lane lane
(vector_size in) (vector_size in)
(size_from_ty $I64))) (size_from_ty $I64)))
(hi Reg (asr64_imm lo (imm_shift_from_u8 63))) (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63)))
) )
(value_regs lo hi))) (value_regs lo hi)))
@@ -566,7 +566,7 @@
(lo Reg (mov_from_vec (put_in_reg vec) (lo Reg (mov_from_vec (put_in_reg vec)
lane lane
(VectorSize.Size64x2))) (VectorSize.Size64x2)))
(hi Reg (asr64_imm lo (imm_shift_from_u8 63))) (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63)))
) )
(value_regs lo hi))) (value_regs lo hi)))
@@ -592,8 +592,8 @@
(x_regs ValueRegs (put_in_regs x)) (x_regs ValueRegs (put_in_regs x))
(x_lo Reg (value_regs_get x_regs 0)) (x_lo Reg (value_regs_get x_regs 0))
(x_hi Reg (value_regs_get x_regs 1)) (x_hi Reg (value_regs_get x_regs 1))
(new_lo Reg (orr_not64 (zero_reg) x_lo)) (new_lo Reg (orr_not $I64 (zero_reg) x_lo))
(new_hi Reg (orr_not64 (zero_reg) x_hi)) (new_hi Reg (orr_not $I64 (zero_reg) x_hi))
) )
(value_regs new_lo new_hi))) (value_regs new_lo new_hi)))
@@ -604,12 +604,12 @@
;;;; Rules for `band` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `band` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (band x y))) (rule (lower (has_type (fits_in_32 ty) (band x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.And32) ty x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)))
(rule (lower (has_type $I64 (band x y))) (rule (lower (has_type $I64 (band x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.And64) $I64 x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.And) $I64 x y)))
(rule (lower (has_type $I128 (band x y))) (i128_alu_bitop (ALUOp.And64) x y)) (rule (lower (has_type $I128 (band x y))) (i128_alu_bitop (ALUOp.And) $I64 x y))
(rule (lower (has_type (vec128 ty) (band x y))) (rule (lower (has_type (vec128 ty) (band x y)))
(value_reg (and_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) (value_reg (and_vec (put_in_reg x) (put_in_reg y) (vector_size ty))))
@@ -617,12 +617,12 @@
;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (bor x y))) (rule (lower (has_type (fits_in_32 ty) (bor x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr32) ty x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr) ty x y)))
(rule (lower (has_type $I64 (bor x y))) (rule (lower (has_type $I64 (bor x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr64) $I64 x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr) $I64 x y)))
(rule (lower (has_type $I128 (bor x y))) (i128_alu_bitop (ALUOp.Orr64) x y)) (rule (lower (has_type $I128 (bor x y))) (i128_alu_bitop (ALUOp.Orr) $I64 x y))
(rule (lower (has_type (vec128 ty) (bor x y))) (rule (lower (has_type (vec128 ty) (bor x y)))
(value_reg (orr_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) (value_reg (orr_vec (put_in_reg x) (put_in_reg y) (vector_size ty))))
@@ -630,12 +630,12 @@
;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (bxor x y))) (rule (lower (has_type (fits_in_32 ty) (bxor x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor32) ty x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor) ty x y)))
(rule (lower (has_type $I64 (bxor x y))) (rule (lower (has_type $I64 (bxor x y)))
(value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor64) $I64 x y))) (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor) $I64 x y)))
(rule (lower (has_type $I128 (bxor x y))) (i128_alu_bitop (ALUOp.Eor64) x y)) (rule (lower (has_type $I128 (bxor x y))) (i128_alu_bitop (ALUOp.Eor) $I64 x y))
(rule (lower (has_type (vec128 ty) (bxor x y))) (rule (lower (has_type (vec128 ty) (bxor x y)))
(value_reg (eor_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) (value_reg (eor_vec (put_in_reg x) (put_in_reg y) (vector_size ty))))
@@ -643,12 +643,12 @@
;;;; Rules for `band_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `band_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (band_not x y))) (rule (lower (has_type (fits_in_32 ty) (band_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.AndNot32) ty x y))) (value_reg (alu_rs_imm_logic (ALUOp.AndNot) ty x y)))
(rule (lower (has_type $I64 (band_not x y))) (rule (lower (has_type $I64 (band_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.AndNot64) $I64 x y))) (value_reg (alu_rs_imm_logic (ALUOp.AndNot) $I64 x y)))
(rule (lower (has_type $I128 (band_not x y))) (i128_alu_bitop (ALUOp.AndNot64) x y)) (rule (lower (has_type $I128 (band_not x y))) (i128_alu_bitop (ALUOp.AndNot) $I64 x y))
(rule (lower (has_type (vec128 ty) (band_not x y))) (rule (lower (has_type (vec128 ty) (band_not x y)))
(value_reg (bic_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) (value_reg (bic_vec (put_in_reg x) (put_in_reg y) (vector_size ty))))
@@ -656,32 +656,32 @@
;;;; Rules for `bor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `bor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (bor_not x y))) (rule (lower (has_type (fits_in_32 ty) (bor_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.OrrNot32) ty x y))) (value_reg (alu_rs_imm_logic (ALUOp.OrrNot) ty x y)))
(rule (lower (has_type $I64 (bor_not x y))) (rule (lower (has_type $I64 (bor_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.OrrNot64) $I64 x y))) (value_reg (alu_rs_imm_logic (ALUOp.OrrNot) $I64 x y)))
(rule (lower (has_type $I128 (bor_not x y))) (i128_alu_bitop (ALUOp.OrrNot64) x y)) (rule (lower (has_type $I128 (bor_not x y))) (i128_alu_bitop (ALUOp.OrrNot) $I64 x y))
;;;; Rules for `bxor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `bxor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type (fits_in_32 ty) (bxor_not x y))) (rule (lower (has_type (fits_in_32 ty) (bxor_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.EorNot32) ty x y))) (value_reg (alu_rs_imm_logic (ALUOp.EorNot) $I32 x y)))
(rule (lower (has_type $I64 (bxor_not x y))) (rule (lower (has_type $I64 (bxor_not x y)))
(value_reg (alu_rs_imm_logic (ALUOp.EorNot64) $I64 x y))) (value_reg (alu_rs_imm_logic (ALUOp.EorNot) $I64 x y)))
(rule (lower (has_type $I128 (bxor_not x y))) (i128_alu_bitop (ALUOp.EorNot64) x y)) (rule (lower (has_type $I128 (bxor_not x y))) (i128_alu_bitop (ALUOp.EorNot) $I64 x y))
;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Shift for i8/i16/i32. ;; Shift for i8/i16/i32.
(rule (lower (has_type (fits_in_32 ty) (ishl x y))) (rule (lower (has_type (fits_in_32 ty) (ishl x y)))
(value_reg (do_shift (ALUOp.Lsl32) ty (put_in_reg x) y))) (value_reg (do_shift (ALUOp.Lsl) ty (put_in_reg x) y)))
;; Shift for i64. ;; Shift for i64.
(rule (lower (has_type $I64 (ishl x y))) (rule (lower (has_type $I64 (ishl x y)))
(value_reg (do_shift (ALUOp.Lsl64) $I64 (put_in_reg x) y))) (value_reg (do_shift (ALUOp.Lsl) $I64 (put_in_reg x) y)))
;; Shift for i128. ;; Shift for i128.
(rule (lower (has_type $I128 (ishl x y))) (rule (lower (has_type $I128 (ishl x y)))
@@ -701,15 +701,15 @@
(let ( (let (
(src_lo Reg (value_regs_get src 0)) (src_lo Reg (value_regs_get src 0))
(src_hi Reg (value_regs_get src 1)) (src_hi Reg (value_regs_get src 1))
(lo_lshift Reg (lsl64 src_lo amt)) (lo_lshift Reg (lsl $I64 src_lo amt))
(hi_lshift Reg (lsl64 src_hi amt)) (hi_lshift Reg (lsl $I64 src_hi amt))
(inv_amt Reg (orr_not32 (zero_reg) amt)) (inv_amt Reg (orr_not $I32 (zero_reg) amt))
(lo_rshift Reg (lsr64 (lsr64_imm src_lo (imm_shift_from_u8 1)) (lo_rshift Reg (lsr $I64 (lsr_imm $I64 src_lo (imm_shift_from_u8 1))
inv_amt)) inv_amt))
(maybe_hi Reg (orr64 hi_lshift lo_rshift)) (maybe_hi Reg (orr $I64 hi_lshift lo_rshift))
) )
(with_flags_2 (with_flags_2
(tst64_imm amt (u64_into_imm_logic $I64 64)) (tst_imm $I64 amt (u64_into_imm_logic $I64 64))
(csel (Cond.Ne) (zero_reg) lo_lshift) (csel (Cond.Ne) (zero_reg) lo_lshift)
(csel (Cond.Ne) lo_lshift maybe_hi)))) (csel (Cond.Ne) lo_lshift maybe_hi))))
@@ -741,16 +741,16 @@
(rule (do_shift op (fits_in_16 ty) x y) (rule (do_shift op (fits_in_16 ty) x y)
(let ( (let (
(shift_amt Reg (value_regs_get (put_in_regs y) 0)) (shift_amt Reg (value_regs_get (put_in_regs y) 0))
(masked_shift_amt Reg (and32_imm shift_amt (shift_mask ty))) (masked_shift_amt Reg (and_imm $I32 shift_amt (shift_mask ty)))
) )
(alu_rrr op x masked_shift_amt))) (alu_rrr op $I32 x masked_shift_amt)))
(decl shift_mask (Type) ImmLogic) (decl shift_mask (Type) ImmLogic)
(extern constructor shift_mask shift_mask) (extern constructor shift_mask shift_mask)
;; 32/64-bit shift base cases. ;; 32/64-bit shift base cases.
(rule (do_shift op $I32 x y) (alu_rrr op x (value_regs_get (put_in_regs y) 0))) (rule (do_shift op $I32 x y) (alu_rrr op $I32 x (value_regs_get (put_in_regs y) 0)))
(rule (do_shift op $I64 x y) (alu_rrr op x (value_regs_get (put_in_regs y) 0))) (rule (do_shift op $I64 x y) (alu_rrr op $I64 x (value_regs_get (put_in_regs y) 0)))
;; Special case for shifting by a constant value where the value can fit into an ;; Special case for shifting by a constant value where the value can fit into an
;; `ImmShift`. ;; `ImmShift`.
@@ -759,17 +759,17 @@
;; to ensure it's attempted first, otherwise the type-based filters on the ;; to ensure it's attempted first, otherwise the type-based filters on the
;; previous rules seem to take priority over this rule. ;; previous rules seem to take priority over this rule.
(rule 1 (do_shift op ty x (def_inst (iconst (imm_shift_from_imm64 <ty shift)))) (rule 1 (do_shift op ty x (def_inst (iconst (imm_shift_from_imm64 <ty shift))))
(alu_rr_imm_shift op x shift)) (alu_rr_imm_shift op ty x shift))
;;;; Rules for `ushr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `ushr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Shift for i8/i16/i32. ;; Shift for i8/i16/i32.
(rule (lower (has_type (fits_in_32 ty) (ushr x y))) (rule (lower (has_type (fits_in_32 ty) (ushr x y)))
(value_reg (do_shift (ALUOp.Lsr32) ty (put_in_reg_zext32 x) y))) (value_reg (do_shift (ALUOp.Lsr) ty (put_in_reg_zext32 x) y)))
;; Shift for i64. ;; Shift for i64.
(rule (lower (has_type $I64 (ushr x y))) (rule (lower (has_type $I64 (ushr x y)))
(value_reg (do_shift (ALUOp.Lsr64) $I64 (put_in_reg_zext64 x) y))) (value_reg (do_shift (ALUOp.Lsr) $I64 (put_in_reg_zext64 x) y)))
;; Shift for i128. ;; Shift for i128.
(rule (lower (has_type $I128 (ushr x y))) (rule (lower (has_type $I128 (ushr x y)))
@@ -779,7 +779,7 @@
(rule (lower (has_type (vec128 ty) (ushr x y))) (rule (lower (has_type (vec128 ty) (ushr x y)))
(let ( (let (
(size VectorSize (vector_size ty)) (size VectorSize (vector_size ty))
(shift Reg (vec_dup (sub32 (zero_reg) (put_in_reg y)) size)) (shift Reg (vec_dup (sub $I32 (zero_reg) (put_in_reg y)) size))
) )
(value_reg (ushl (put_in_reg x) shift size)))) (value_reg (ushl (put_in_reg x) shift size))))
@@ -797,16 +797,16 @@
(let ( (let (
(src_lo Reg (value_regs_get src 0)) (src_lo Reg (value_regs_get src 0))
(src_hi Reg (value_regs_get src 1)) (src_hi Reg (value_regs_get src 1))
(lo_rshift Reg (lsr64 src_lo amt)) (lo_rshift Reg (lsr $I64 src_lo amt))
(hi_rshift Reg (lsr64 src_hi amt)) (hi_rshift Reg (lsr $I64 src_hi amt))
(inv_amt Reg (orr_not32 (zero_reg) amt)) (inv_amt Reg (orr_not $I32 (zero_reg) amt))
(hi_lshift Reg (lsl64 (lsl64_imm src_hi (imm_shift_from_u8 1)) (hi_lshift Reg (lsl $I64 (lsl_imm $I64 src_hi (imm_shift_from_u8 1))
inv_amt)) inv_amt))
(maybe_lo Reg (orr64 lo_rshift hi_lshift)) (maybe_lo Reg (orr $I64 lo_rshift hi_lshift))
) )
(with_flags_2 (with_flags_2
(tst64_imm amt (u64_into_imm_logic $I64 64)) (tst_imm $I64 amt (u64_into_imm_logic $I64 64))
(csel (Cond.Ne) hi_rshift maybe_lo) (csel (Cond.Ne) hi_rshift maybe_lo)
(csel (Cond.Ne) (zero_reg) hi_rshift)))) (csel (Cond.Ne) (zero_reg) hi_rshift))))
@@ -814,11 +814,11 @@
;; Shift for i8/i16/i32. ;; Shift for i8/i16/i32.
(rule (lower (has_type (fits_in_32 ty) (sshr x y))) (rule (lower (has_type (fits_in_32 ty) (sshr x y)))
(value_reg (do_shift (ALUOp.Asr32) ty (put_in_reg_sext32 x) y))) (value_reg (do_shift (ALUOp.Asr) ty (put_in_reg_sext32 x) y)))
;; Shift for i64. ;; Shift for i64.
(rule (lower (has_type $I64 (sshr x y))) (rule (lower (has_type $I64 (sshr x y)))
(value_reg (do_shift (ALUOp.Asr64) $I64 (put_in_reg_sext64 x) y))) (value_reg (do_shift (ALUOp.Asr) $I64 (put_in_reg_sext64 x) y)))
;; Shift for i128. ;; Shift for i128.
(rule (lower (has_type $I128 (sshr x y))) (rule (lower (has_type $I128 (sshr x y)))
@@ -830,7 +830,7 @@
(rule (lower (has_type (vec128 ty) (sshr x y))) (rule (lower (has_type (vec128 ty) (sshr x y)))
(let ( (let (
(size VectorSize (vector_size ty)) (size VectorSize (vector_size ty))
(shift Reg (vec_dup (sub32 (zero_reg) (put_in_reg y)) size)) (shift Reg (vec_dup (sub $I32 (zero_reg) (put_in_reg y)) size))
) )
(value_reg (sshl (put_in_reg x) shift size)))) (value_reg (sshl (put_in_reg x) shift size))))
@@ -849,17 +849,17 @@
(let ( (let (
(src_lo Reg (value_regs_get src 0)) (src_lo Reg (value_regs_get src 0))
(src_hi Reg (value_regs_get src 1)) (src_hi Reg (value_regs_get src 1))
(lo_rshift Reg (lsr64 src_lo amt)) (lo_rshift Reg (lsr $I64 src_lo amt))
(hi_rshift Reg (asr64 src_hi amt)) (hi_rshift Reg (asr $I64 src_hi amt))
(inv_amt Reg (orr_not32 (zero_reg) amt)) (inv_amt Reg (orr_not $I32 (zero_reg) amt))
(hi_lshift Reg (lsl64 (lsl64_imm src_hi (imm_shift_from_u8 1)) (hi_lshift Reg (lsl $I64 (lsl_imm $I64 src_hi (imm_shift_from_u8 1))
inv_amt)) inv_amt))
(hi_sign Reg (asr64_imm src_hi (imm_shift_from_u8 63))) (hi_sign Reg (asr_imm $I64 src_hi (imm_shift_from_u8 63)))
(maybe_lo Reg (orr64 lo_rshift hi_lshift)) (maybe_lo Reg (orr $I64 lo_rshift hi_lshift))
) )
(with_flags_2 (with_flags_2
(tst64_imm amt (u64_into_imm_logic $I64 64)) (tst_imm $I64 amt (u64_into_imm_logic $I64 64))
(csel (Cond.Ne) hi_rshift maybe_lo) (csel (Cond.Ne) hi_rshift maybe_lo)
(csel (Cond.Ne) hi_sign hi_rshift)))) (csel (Cond.Ne) hi_sign hi_rshift))))
@@ -867,7 +867,7 @@
;; General 8/16-bit case. ;; General 8/16-bit case.
(rule (lower (has_type (fits_in_16 ty) (rotl x y))) (rule (lower (has_type (fits_in_16 ty) (rotl x y)))
(let ((neg_shift Reg (sub32 (zero_reg) (put_in_reg y)))) (let ((neg_shift Reg (sub $I32 (zero_reg) (put_in_reg y))))
(value_reg (small_rotr ty (put_in_reg_zext32 x) neg_shift)))) (value_reg (small_rotr ty (put_in_reg_zext32 x) neg_shift))))
;; Specialization for the 8/16-bit case when the rotation amount is an immediate. ;; Specialization for the 8/16-bit case when the rotation amount is an immediate.
@@ -884,21 +884,21 @@
;; General 32-bit case. ;; General 32-bit case.
(rule (lower (has_type $I32 (rotl x y))) (rule (lower (has_type $I32 (rotl x y)))
(let ((neg_shift Reg (sub32 (zero_reg) (put_in_reg y)))) (let ((neg_shift Reg (sub $I32 (zero_reg) (put_in_reg y))))
(value_reg (rotr32 (put_in_reg x) neg_shift)))) (value_reg (a64_rotr $I32 (put_in_reg x) neg_shift))))
;; General 64-bit case. ;; General 64-bit case.
(rule (lower (has_type $I64 (rotl x y))) (rule (lower (has_type $I64 (rotl x y)))
(let ((neg_shift Reg (sub64 (zero_reg) (put_in_reg y)))) (let ((neg_shift Reg (sub $I64 (zero_reg) (put_in_reg y))))
(value_reg (rotr64 (put_in_reg x) neg_shift)))) (value_reg (a64_rotr $I64 (put_in_reg x) neg_shift))))
;; Specialization for the 32-bit case when the rotation amount is an immediate. ;; Specialization for the 32-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I32 (rotl x (def_inst (iconst (imm_shift_from_imm64 <$I32 n)))))) (rule (lower (has_type $I32 (rotl x (def_inst (iconst (imm_shift_from_imm64 <$I32 n))))))
(value_reg (rotr32_imm (put_in_reg x) (negate_imm_shift $I32 n)))) (value_reg (a64_rotr_imm $I32 (put_in_reg x) (negate_imm_shift $I32 n))))
;; Specialization for the 64-bit case when the rotation amount is an immediate. ;; Specialization for the 64-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I64 (rotl x (def_inst (iconst (imm_shift_from_imm64 <$I64 n)))))) (rule (lower (has_type $I64 (rotl x (def_inst (iconst (imm_shift_from_imm64 <$I64 n))))))
(value_reg (rotr64_imm (put_in_reg x) (negate_imm_shift $I64 n)))) (value_reg (a64_rotr_imm $I64 (put_in_reg x) (negate_imm_shift $I64 n))))
(decl negate_imm_shift (Type ImmShift) ImmShift) (decl negate_imm_shift (Type ImmShift) ImmShift)
(extern constructor negate_imm_shift negate_imm_shift) (extern constructor negate_imm_shift negate_imm_shift)
@@ -910,13 +910,13 @@
(let ( (let (
(val ValueRegs (put_in_regs x)) (val ValueRegs (put_in_regs x))
(amt Reg (value_regs_get (put_in_regs y) 0)) (amt Reg (value_regs_get (put_in_regs y) 0))
(neg_amt Reg (sub64 (imm $I64 128) amt)) (neg_amt Reg (sub $I64 (imm $I64 128) amt))
(lshift ValueRegs (lower_shl128 val amt)) (lshift ValueRegs (lower_shl128 val amt))
(rshift ValueRegs (lower_ushr128 val neg_amt)) (rshift ValueRegs (lower_ushr128 val neg_amt))
) )
(value_regs (value_regs
(orr64 (value_regs_get lshift 0) (value_regs_get rshift 0)) (orr $I64 (value_regs_get lshift 0) (value_regs_get rshift 0))
(orr64 (value_regs_get lshift 1) (value_regs_get rshift 1))))) (orr $I64 (value_regs_get lshift 1) (value_regs_get rshift 1)))))
;;;; Rules for `rotr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `rotr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -926,11 +926,11 @@
;; General 32-bit case. ;; General 32-bit case.
(rule (lower (has_type $I32 (rotr x y))) (rule (lower (has_type $I32 (rotr x y)))
(value_reg (rotr32 (put_in_reg x) (put_in_reg y)))) (value_reg (a64_rotr $I32 (put_in_reg x) (put_in_reg y))))
;; General 64-bit case. ;; General 64-bit case.
(rule (lower (has_type $I64 (rotr x y))) (rule (lower (has_type $I64 (rotr x y)))
(value_reg (rotr64 (put_in_reg x) (put_in_reg y)))) (value_reg (a64_rotr $I64 (put_in_reg x) (put_in_reg y))))
;; Specialization for the 8/16-bit case when the rotation amount is an immediate. ;; Specialization for the 8/16-bit case when the rotation amount is an immediate.
(rule (lower (has_type (fits_in_16 ty) (rotr x (def_inst (iconst (imm_shift_from_imm64 <ty n)))))) (rule (lower (has_type (fits_in_16 ty) (rotr x (def_inst (iconst (imm_shift_from_imm64 <ty n))))))
@@ -938,11 +938,11 @@
;; Specialization for the 32-bit case when the rotation amount is an immediate. ;; Specialization for the 32-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I32 (rotr x (def_inst (iconst (imm_shift_from_imm64 <$I32 n)))))) (rule (lower (has_type $I32 (rotr x (def_inst (iconst (imm_shift_from_imm64 <$I32 n))))))
(value_reg (rotr32_imm (put_in_reg x) n))) (value_reg (a64_rotr_imm $I32 (put_in_reg x) n)))
;; Specialization for the 64-bit case when the rotation amount is an immediate. ;; Specialization for the 64-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I64 (rotr x (def_inst (iconst (imm_shift_from_imm64 <$I64 n)))))) (rule (lower (has_type $I64 (rotr x (def_inst (iconst (imm_shift_from_imm64 <$I64 n))))))
(value_reg (rotr64_imm (put_in_reg x) n))) (value_reg (a64_rotr_imm $I64 (put_in_reg x) n)))
;; For a < 32-bit rotate-right, we synthesize this as: ;; For a < 32-bit rotate-right, we synthesize this as:
;; ;;
@@ -959,13 +959,13 @@
(decl small_rotr (Type Reg Reg) Reg) (decl small_rotr (Type Reg Reg) Reg)
(rule (small_rotr ty val amt) (rule (small_rotr ty val amt)
(let ( (let (
(masked_amt Reg (and32_imm amt (rotr_mask ty))) (masked_amt Reg (and_imm $I32 amt (rotr_mask ty)))
(tmp_sub Reg (sub32_imm masked_amt (u8_into_imm12 (ty_bits ty)))) (tmp_sub Reg (sub_imm $I32 masked_amt (u8_into_imm12 (ty_bits ty))))
(neg_amt Reg (sub32 (zero_reg) tmp_sub)) (neg_amt Reg (sub $I32 (zero_reg) tmp_sub))
(val_rshift Reg (lsr32 val masked_amt)) (val_rshift Reg (lsr $I32 val masked_amt))
(val_lshift Reg (lsl32 val neg_amt)) (val_lshift Reg (lsl $I32 val neg_amt))
) )
(orr32 val_lshift val_rshift))) (orr $I32 val_lshift val_rshift)))
(decl rotr_mask (Type) ImmLogic) (decl rotr_mask (Type) ImmLogic)
(extern constructor rotr_mask rotr_mask) (extern constructor rotr_mask rotr_mask)
@@ -982,10 +982,10 @@
(decl small_rotr_imm (Type Reg ImmShift) Reg) (decl small_rotr_imm (Type Reg ImmShift) Reg)
(rule (small_rotr_imm ty val amt) (rule (small_rotr_imm ty val amt)
(let ( (let (
(val_rshift Reg (lsr32_imm val amt)) (val_rshift Reg (lsr_imm $I32 val amt))
(val_lshift Reg (lsl32_imm val (rotr_opposite_amount ty amt))) (val_lshift Reg (lsl_imm $I32 val (rotr_opposite_amount ty amt)))
) )
(orr32 val_lshift val_rshift))) (orr $I32 val_lshift val_rshift)))
(decl rotr_opposite_amount (Type ImmShift) ImmShift) (decl rotr_opposite_amount (Type ImmShift) ImmShift)
(extern constructor rotr_opposite_amount rotr_opposite_amount) (extern constructor rotr_opposite_amount rotr_opposite_amount)
@@ -997,11 +997,11 @@
(let ( (let (
(val ValueRegs (put_in_regs x)) (val ValueRegs (put_in_regs x))
(amt Reg (value_regs_get (put_in_regs y) 0)) (amt Reg (value_regs_get (put_in_regs y) 0))
(neg_amt Reg (sub64 (imm $I64 128) amt)) (neg_amt Reg (sub $I64 (imm $I64 128) amt))
(rshift ValueRegs (lower_ushr128 val amt)) (rshift ValueRegs (lower_ushr128 val amt))
(lshift ValueRegs (lower_shl128 val neg_amt)) (lshift ValueRegs (lower_shl128 val neg_amt))
(hi Reg (orr64 (value_regs_get rshift 1) (value_regs_get lshift 1))) (hi Reg (orr $I64 (value_regs_get rshift 1) (value_regs_get lshift 1)))
(lo Reg (orr64 (value_regs_get rshift 0) (value_regs_get lshift 0))) (lo Reg (orr $I64 (value_regs_get rshift 0) (value_regs_get lshift 0)))
) )
(value_regs lo hi))) (value_regs lo hi)))
@@ -1011,13 +1011,13 @@
;; the reversed result in the highest 8 bits, so we need to shift them down into ;; the reversed result in the highest 8 bits, so we need to shift them down into
;; place. ;; place.
(rule (lower (has_type $I8 (bitrev x))) (rule (lower (has_type $I8 (bitrev x)))
(value_reg (lsr32_imm (rbit32 (put_in_reg x)) (imm_shift_from_u8 24)))) (value_reg (lsr_imm $I32 (rbit32 (put_in_reg x)) (imm_shift_from_u8 24))))
;; Reversing an 16-bit value with a 32-bit bitrev instruction will place ;; Reversing an 16-bit value with a 32-bit bitrev instruction will place
;; the reversed result in the highest 16 bits, so we need to shift them down into ;; the reversed result in the highest 16 bits, so we need to shift them down into
;; place. ;; place.
(rule (lower (has_type $I16 (bitrev x))) (rule (lower (has_type $I16 (bitrev x)))
(value_reg (lsr32_imm (rbit32 (put_in_reg x)) (imm_shift_from_u8 16)))) (value_reg (lsr_imm $I32 (rbit32 (put_in_reg x)) (imm_shift_from_u8 16))))
(rule (lower (has_type $I32 (bitrev x))) (rule (lower (has_type $I32 (bitrev x)))
(value_reg (rbit32 (put_in_reg x)))) (value_reg (rbit32 (put_in_reg x))))
@@ -1036,10 +1036,10 @@
;;;; Rules for `clz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `clz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type $I8 (clz x))) (rule (lower (has_type $I8 (clz x)))
(value_reg (sub32_imm (clz32 (put_in_reg_zext32 x)) (u8_into_imm12 24)))) (value_reg (sub_imm $I32 (clz32 (put_in_reg_zext32 x)) (u8_into_imm12 24))))
(rule (lower (has_type $I16 (clz x))) (rule (lower (has_type $I16 (clz x)))
(value_reg (sub32_imm (clz32 (put_in_reg_zext32 x)) (u8_into_imm12 16)))) (value_reg (sub_imm $I32 (clz32 (put_in_reg_zext32 x)) (u8_into_imm12 16))))
(rule (lower (has_type $I32 (clz x))) (rule (lower (has_type $I32 (clz x)))
(value_reg (clz32 (put_in_reg x)))) (value_reg (clz32 (put_in_reg x))))
@@ -1060,7 +1060,7 @@
(let ( (let (
(hi_clz Reg (clz64 (value_regs_get val 1))) (hi_clz Reg (clz64 (value_regs_get val 1)))
(lo_clz Reg (clz64 (value_regs_get val 0))) (lo_clz Reg (clz64 (value_regs_get val 0)))
(tmp Reg (lsr64_imm hi_clz (imm_shift_from_u8 6))) (tmp Reg (lsr_imm $I64 hi_clz (imm_shift_from_u8 6)))
) )
(value_regs (madd64 lo_clz tmp hi_clz) (imm $I64 0)))) (value_regs (madd64 lo_clz tmp hi_clz) (imm $I64 0))))
@@ -1071,10 +1071,10 @@
;; leading zeros of the reversed value. ;; leading zeros of the reversed value.
(rule (lower (has_type $I8 (ctz x))) (rule (lower (has_type $I8 (ctz x)))
(value_reg (clz32 (orr32_imm (rbit32 (put_in_reg x)) (u64_into_imm_logic $I32 0x800000))))) (value_reg (clz32 (orr_imm $I32 (rbit32 (put_in_reg x)) (u64_into_imm_logic $I32 0x800000)))))
(rule (lower (has_type $I16 (ctz x))) (rule (lower (has_type $I16 (ctz x)))
(value_reg (clz32 (orr32_imm (rbit32 (put_in_reg x)) (u64_into_imm_logic $I32 0x8000))))) (value_reg (clz32 (orr_imm $I32 (rbit32 (put_in_reg x)) (u64_into_imm_logic $I32 0x8000)))))
(rule (lower (has_type $I32 (ctz x))) (rule (lower (has_type $I32 (ctz x)))
(value_reg (clz32 (rbit32 (put_in_reg x))))) (value_reg (clz32 (rbit32 (put_in_reg x)))))
@@ -1093,10 +1093,10 @@
;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (has_type $I8 (cls x))) (rule (lower (has_type $I8 (cls x)))
(value_reg (sub32_imm (cls32 (put_in_reg_zext32 x)) (u8_into_imm12 24)))) (value_reg (sub_imm $I32 (cls32 (put_in_reg_zext32 x)) (u8_into_imm12 24))))
(rule (lower (has_type $I16 (cls x))) (rule (lower (has_type $I16 (cls x)))
(value_reg (sub32_imm (cls32 (put_in_reg_zext32 x)) (u8_into_imm12 16)))) (value_reg (sub_imm $I32 (cls32 (put_in_reg_zext32 x)) (u8_into_imm12 16))))
(rule (lower (has_type $I32 (cls x))) (rule (lower (has_type $I32 (cls x)))
(value_reg (cls32 (put_in_reg x)))) (value_reg (cls32 (put_in_reg x))))
@@ -1120,15 +1120,15 @@
(hi Reg (value_regs_get val 1)) (hi Reg (value_regs_get val 1))
(lo_cls Reg (cls64 lo)) (lo_cls Reg (cls64 lo))
(hi_cls Reg (cls64 hi)) (hi_cls Reg (cls64 hi))
(sign_eq_eon Reg (eon64 hi lo)) (sign_eq_eon Reg (eon $I64 hi lo))
(sign_eq Reg (lsr64_imm sign_eq_eon (imm_shift_from_u8 63))) (sign_eq Reg (lsr_imm $I64 sign_eq_eon (imm_shift_from_u8 63)))
(lo_sign_bits Reg (madd64 lo_cls sign_eq sign_eq)) (lo_sign_bits Reg (madd64 lo_cls sign_eq sign_eq))
(maybe_lo Reg (with_flags_1 (maybe_lo Reg (with_flags_1
(cmp64_imm hi_cls (u8_into_imm12 63)) (cmp64_imm hi_cls (u8_into_imm12 63))
(csel (Cond.Eq) lo_sign_bits (zero_reg)) (csel (Cond.Eq) lo_sign_bits (zero_reg))
)) ))
) )
(value_regs (add64 maybe_lo hi_cls) (imm $I64 0)))) (value_regs (add $I64 maybe_lo hi_cls) (imm $I64 0))))
;;;; Rules for `popcnt` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `popcnt` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@@ -454,22 +454,32 @@ pub(crate) fn put_input_in_rse_imm12<C: LowerCtx<I = Inst>>(
//============================================================================ //============================================================================
// ALU instruction constructors. // ALU instruction constructors.
pub(crate) fn alu_inst_imm12(op: ALUOp, rd: Writable<Reg>, rn: Reg, rm: ResultRSEImm12) -> Inst { pub(crate) fn alu_inst_imm12(
op: ALUOp,
ty: Type,
rd: Writable<Reg>,
rn: Reg,
rm: ResultRSEImm12,
) -> Inst {
let size = OperandSize::from_ty(ty);
match rm { match rm {
ResultRSEImm12::Imm12(imm12) => Inst::AluRRImm12 { ResultRSEImm12::Imm12(imm12) => Inst::AluRRImm12 {
alu_op: op, alu_op: op,
size,
rd, rd,
rn, rn,
imm12, imm12,
}, },
ResultRSEImm12::Reg(rm) => Inst::AluRRR { ResultRSEImm12::Reg(rm) => Inst::AluRRR {
alu_op: op, alu_op: op,
size,
rd, rd,
rn, rn,
rm, rm,
}, },
ResultRSEImm12::RegShift(rm, shiftop) => Inst::AluRRRShift { ResultRSEImm12::RegShift(rm, shiftop) => Inst::AluRRRShift {
alu_op: op, alu_op: op,
size,
rd, rd,
rn, rn,
rm, rm,
@@ -477,6 +487,7 @@ pub(crate) fn alu_inst_imm12(op: ALUOp, rd: Writable<Reg>, rn: Reg, rm: ResultRS
}, },
ResultRSEImm12::RegExtend(rm, extendop) => Inst::AluRRRExtend { ResultRSEImm12::RegExtend(rm, extendop) => Inst::AluRRRExtend {
alu_op: op, alu_op: op,
size,
rd, rd,
rn, rn,
rm, rm,
@@ -772,7 +783,8 @@ fn lower_add_addends<C: LowerCtx<I = Inst>>(
reg reg
}; };
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd, rd,
rn: rd.to_reg(), rn: rd.to_reg(),
rm: reg, rm: reg,
@@ -781,7 +793,8 @@ fn lower_add_addends<C: LowerCtx<I = Inst>>(
for (reg, extendop) in addends32 { for (reg, extendop) in addends32 {
assert!(reg != stack_reg()); assert!(reg != stack_reg());
ctx.emit(Inst::AluRRRExtend { ctx.emit(Inst::AluRRRExtend {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd, rd,
rn: rd.to_reg(), rn: rd.to_reg(),
rm: reg, rm: reg,
@@ -797,14 +810,16 @@ fn lower_add_immediate<C: LowerCtx<I = Inst>>(ctx: &mut C, dst: Writable<Reg>, s
// Otherwise, lower the constant first then add. // Otherwise, lower the constant first then add.
if let Some(imm12) = Imm12::maybe_from_u64(imm as u64) { if let Some(imm12) = Imm12::maybe_from_u64(imm as u64) {
ctx.emit(Inst::AluRRImm12 { ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: dst, rd: dst,
rn: src, rn: src,
imm12, imm12,
}); });
} else if let Some(imm12) = Imm12::maybe_from_u64(imm.wrapping_neg() as u64) { } else if let Some(imm12) = Imm12::maybe_from_u64(imm.wrapping_neg() as u64) {
ctx.emit(Inst::AluRRImm12 { ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::Sub64, alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: dst, rd: dst,
rn: src, rn: src,
imm12, imm12,
@@ -812,7 +827,8 @@ fn lower_add_immediate<C: LowerCtx<I = Inst>>(ctx: &mut C, dst: Writable<Reg>, s
} else { } else {
lower_constant_u64(ctx, dst, imm as u64); lower_constant_u64(ctx, dst, imm as u64);
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Add64, alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: dst, rd: dst,
rn: dst.to_reg(), rn: dst.to_reg(),
rm: src, rm: src,
@@ -1250,19 +1266,22 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
// cset dst, {eq, ne} // cset dst, {eq, ne}
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64, alu_op: ALUOp::Eor,
size: OperandSize::Size64,
rd: tmp1, rd: tmp1,
rn: lhs.regs()[0], rn: lhs.regs()[0],
rm: rhs.regs()[0], rm: rhs.regs()[0],
}); });
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Eor64, alu_op: ALUOp::Eor,
size: OperandSize::Size64,
rd: tmp2, rd: tmp2,
rn: lhs.regs()[1], rn: lhs.regs()[1],
rm: rhs.regs()[1], rm: rhs.regs()[1],
}); });
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64, alu_op: ALUOp::AddS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: tmp1.to_reg(), rn: tmp1.to_reg(),
rm: tmp2.to_reg(), rm: tmp2.to_reg(),
@@ -1277,13 +1296,15 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
// cset dst, {vs, vc} // cset dst, {vs, vc}
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AddS64, alu_op: ALUOp::AddS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: lhs.regs()[0], rn: lhs.regs()[0],
rm: rhs.regs()[0], rm: rhs.regs()[0],
}); });
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AdcS64, alu_op: ALUOp::AdcS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: lhs.regs()[1], rn: lhs.regs()[1],
rm: rhs.regs()[1], rm: rhs.regs()[1],
@@ -1300,14 +1321,16 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
let unsigned_cond = lower_condcode(condcode.unsigned()); let unsigned_cond = lower_condcode(condcode.unsigned());
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: lhs.regs()[0], rn: lhs.regs()[0],
rm: rhs.regs()[0], rm: rhs.regs()[0],
}); });
materialize_bool_result(ctx, insn, tmp1, unsigned_cond); materialize_bool_result(ctx, insn, tmp1, unsigned_cond);
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: lhs.regs()[1], rn: lhs.regs()[1],
rm: rhs.regs()[1], rm: rhs.regs()[1],
@@ -1345,7 +1368,8 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
}; };
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn, rn,
rm, rm,
@@ -1391,7 +1415,7 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
ExtendOp::SXTH ExtendOp::SXTH
}; };
let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap(); let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
ctx.emit(alu_inst_imm12(ALUOp::Sub32, tmp1, rn, rm)); ctx.emit(alu_inst_imm12(ALUOp::Sub, I32, tmp1, rn, rm));
let out_cond = match condcode { let out_cond = match condcode {
IntCC::Overflow => Cond::Ne, IntCC::Overflow => Cond::Ne,
@@ -1407,8 +1431,7 @@ pub(crate) fn lower_icmp<C: LowerCtx<I = Inst>>(
(cond, rn, rm) (cond, rn, rm)
}; };
let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64); ctx.emit(alu_inst_imm12(ALUOp::SubS, ty, writable_zero_reg(), rn, rm));
ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, rm));
cond cond
}; };

View File

@@ -1,4 +1,4 @@
src/clif.isle 9ea75a6f790b5c03 src/clif.isle 9ea75a6f790b5c03
src/prelude.isle 73285cd431346d53 src/prelude.isle 73285cd431346d53
src/isa/aarch64/inst.isle dafd813ba278ce19 src/isa/aarch64/inst.isle 4c176462894836e5
src/isa/aarch64/lower.isle 2d2e1e076a0c8a23 src/isa/aarch64/lower.isle aff657984bf30686

File diff suppressed because it is too large Load Diff

View File

@@ -410,16 +410,17 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
lower_fcmp_or_ffcmp_to_flags(ctx, fcmp_insn); lower_fcmp_or_ffcmp_to_flags(ctx, fcmp_insn);
cond cond
} else { } else {
let (cmp_op, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 { let (size, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 {
(ALUOp::SubS64, NarrowValueMode::ZeroExtend64) (OperandSize::Size64, NarrowValueMode::ZeroExtend64)
} else { } else {
(ALUOp::SubS32, NarrowValueMode::ZeroExtend32) (OperandSize::Size32, NarrowValueMode::ZeroExtend32)
}; };
let rcond = put_input_in_reg(ctx, inputs[0], narrow_mode); let rcond = put_input_in_reg(ctx, inputs[0], narrow_mode);
// cmp rcond, #0 // cmp rcond, #0
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: cmp_op, alu_op: ALUOp::SubS,
size,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: rcond, rn: rcond,
rm: zero_reg(), rm: zero_reg(),
@@ -507,21 +508,24 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
// AND rTmp, rn, rcond // AND rTmp, rn, rcond
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::And64, alu_op: ALUOp::And,
size: OperandSize::Size64,
rd: tmp, rd: tmp,
rn, rn,
rm: rcond, rm: rcond,
}); });
// BIC rd, rm, rcond // BIC rd, rm, rcond
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::AndNot64, alu_op: ALUOp::AndNot,
size: OperandSize::Size64,
rd, rd,
rn: rm, rn: rm,
rm: rcond, rm: rcond,
}); });
// ORR rd, rd, rTmp // ORR rd, rd, rTmp
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd, rd,
rn: rd.to_reg(), rn: rd.to_reg(),
rm: tmp.to_reg(), rm: tmp.to_reg(),
@@ -571,16 +575,22 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let (alu_op, const_value) = match op { let (alu_op, const_value) = match op {
Opcode::IsNull => { Opcode::IsNull => {
// cmp rn, #0 // cmp rn, #0
(choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64), 0) (ALUOp::SubS, 0)
} }
Opcode::IsInvalid => { Opcode::IsInvalid => {
// cmn rn, #1 // cmn rn, #1
(choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64), 1) (ALUOp::AddS, 1)
} }
_ => unreachable!(), _ => unreachable!(),
}; };
let const_value = ResultRSEImm12::Imm12(Imm12::maybe_from_u64(const_value).unwrap()); let const_value = ResultRSEImm12::Imm12(Imm12::maybe_from_u64(const_value).unwrap());
ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, const_value)); ctx.emit(alu_inst_imm12(
alu_op,
ty,
writable_zero_reg(),
rn,
const_value,
));
materialize_bool_result(ctx, insn, rd, Cond::Eq); materialize_bool_result(ctx, insn, rd, Cond::Eq);
} }
@@ -655,7 +665,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let output = get_output_reg(ctx, outputs[0]); let output = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::AluRRImmLogic { ctx.emit(Inst::AluRRImmLogic {
alu_op: ALUOp::And32, alu_op: ALUOp::And,
size: OperandSize::Size32,
rd: output.regs()[0], rd: output.regs()[0],
rn: input.regs()[0], rn: input.regs()[0],
imml: ImmLogic::maybe_from_u64(1, I32).unwrap(), imml: ImmLogic::maybe_from_u64(1, I32).unwrap(),
@@ -1164,7 +1175,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}); });
ctx.emit(Inst::AluRRImm12 { ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::SubS64, alu_op: ALUOp::SubS,
size: OperandSize::Size64,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: rd.to_reg(), rn: rd.to_reg(),
imm12: Imm12::zero(), imm12: Imm12::zero(),
@@ -1267,7 +1279,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
size: VectorSize::Size64x2, size: VectorSize::Size64x2,
}); });
ctx.emit(Inst::AluRRImmShift { ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsl64, alu_op: ALUOp::Lsl,
size: OperandSize::Size64,
rd: tmp_r0, rd: tmp_r0,
rn: tmp_r0.to_reg(), rn: tmp_r0.to_reg(),
immshift: ImmShift { imm: 4 }, immshift: ImmShift { imm: 4 },
@@ -1322,7 +1335,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
size: VectorSize::Size64x2, size: VectorSize::Size64x2,
}); });
ctx.emit(Inst::AluRRImmShift { ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsl64, alu_op: ALUOp::Lsl,
size: OperandSize::Size64,
rd: tmp_r0, rd: tmp_r0,
rn: tmp_r0.to_reg(), rn: tmp_r0.to_reg(),
immshift: ImmShift { imm: 2 }, immshift: ImmShift { imm: 2 },
@@ -1372,19 +1386,22 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
size: VectorSize::Size64x2, size: VectorSize::Size64x2,
}); });
ctx.emit(Inst::AluRRImmShift { ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsr64, alu_op: ALUOp::Lsr,
size: OperandSize::Size64,
rd: dst_r, rd: dst_r,
rn: dst_r.to_reg(), rn: dst_r.to_reg(),
immshift: ImmShift::maybe_from_u64(63).unwrap(), immshift: ImmShift::maybe_from_u64(63).unwrap(),
}); });
ctx.emit(Inst::AluRRImmShift { ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsr64, alu_op: ALUOp::Lsr,
size: OperandSize::Size64,
rd: tmp_r0, rd: tmp_r0,
rn: tmp_r0.to_reg(), rn: tmp_r0.to_reg(),
immshift: ImmShift::maybe_from_u64(63).unwrap(), immshift: ImmShift::maybe_from_u64(63).unwrap(),
}); });
ctx.emit(Inst::AluRRRShift { ctx.emit(Inst::AluRRRShift {
alu_op: ALUOp::Add32, alu_op: ALUOp::Add,
size: OperandSize::Size32,
rd: dst_r, rd: dst_r,
rn: dst_r.to_reg(), rn: dst_r.to_reg(),
rm: tmp_r0.to_reg(), rm: tmp_r0.to_reg(),
@@ -2255,8 +2272,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None); let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
let ty = ty.unwrap(); let ty = ty.unwrap();
let alu_op = choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64); ctx.emit(alu_inst_imm12(ALUOp::AddS, ty, rd, rn, rm));
ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm));
} }
Opcode::IaddImm Opcode::IaddImm
@@ -2572,7 +2588,8 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap(); let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
let input = put_input_in_regs(ctx, flag_input); let input = put_input_in_regs(ctx, flag_input);
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Orr64, alu_op: ALUOp::Orr,
size: OperandSize::Size64,
rd: tmp, rd: tmp,
rn: input.regs()[0], rn: input.regs()[0],
rm: input.regs()[1], rm: input.regs()[1],
@@ -2710,7 +2727,8 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
// branch to default target below. // branch to default target below.
if let Some(imm12) = Imm12::maybe_from_u64(jt_size as u64) { if let Some(imm12) = Imm12::maybe_from_u64(jt_size as u64) {
ctx.emit(Inst::AluRRImm12 { ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: ridx, rn: ridx,
imm12, imm12,
@@ -2718,7 +2736,8 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
} else { } else {
lower_constant_u64(ctx, rtmp1, jt_size as u64); lower_constant_u64(ctx, rtmp1, jt_size as u64);
ctx.emit(Inst::AluRRR { ctx.emit(Inst::AluRRR {
alu_op: ALUOp::SubS32, alu_op: ALUOp::SubS,
size: OperandSize::Size32,
rd: writable_zero_reg(), rd: writable_zero_reg(),
rn: ridx, rn: ridx,
rm: rtmp1.to_reg(), rm: rtmp1.to_reg(),