diff --git a/cranelift/codegen/src/isa/aarch64/abi.rs b/cranelift/codegen/src/isa/aarch64/abi.rs index 97ad2abd95..3fbfc83564 100644 --- a/cranelift/codegen/src/isa/aarch64/abi.rs +++ b/cranelift/codegen/src/isa/aarch64/abi.rs @@ -514,7 +514,8 @@ impl ABIMachineSpec for AArch64MachineDeps { let mut insts = SmallVec::new(); if let Some(imm12) = Imm12::maybe_from_u64(imm) { insts.push(Inst::AluRRImm12 { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: into_reg, rn: from_reg, imm12, @@ -524,7 +525,8 @@ impl ABIMachineSpec for AArch64MachineDeps { assert_ne!(scratch2.to_reg(), from_reg); insts.extend(Inst::load_constant(scratch2, imm.into())); insts.push(Inst::AluRRRExtend { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: into_reg, rn: from_reg, rm: scratch2.to_reg(), @@ -537,7 +539,8 @@ impl ABIMachineSpec for AArch64MachineDeps { fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec { let mut insts = SmallVec::new(); insts.push(Inst::AluRRRExtend { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: stack_reg(), rm: limit_reg, @@ -586,12 +589,13 @@ impl ABIMachineSpec for AArch64MachineDeps { (-amount as u64, true) }; - let alu_op = if is_sub { ALUOp::Sub64 } else { ALUOp::Add64 }; + let alu_op = if is_sub { ALUOp::Sub } else { ALUOp::Add }; let mut ret = SmallVec::new(); if let Some(imm12) = Imm12::maybe_from_u64(amount) { let adj_inst = Inst::AluRRImm12 { alu_op, + size: OperandSize::Size64, rd: writable_stack_reg(), rn: stack_reg(), imm12, @@ -602,6 +606,7 @@ impl ABIMachineSpec for AArch64MachineDeps { let const_inst = Inst::load_constant(tmp, amount); let adj_inst = Inst::AluRRRExtend { alu_op, + size: OperandSize::Size64, rd: writable_stack_reg(), rn: stack_reg(), rm: tmp.to_reg(), @@ -659,7 +664,8 @@ impl ABIMachineSpec for AArch64MachineDeps { // mov fp (x29), sp. This uses the ADDI rd, rs, 0 form of `MOV` because // the usual encoding (`ORR`) does not work with SP. insts.push(Inst::AluRRImm12 { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: writable_fp_reg(), rn: stack_reg(), imm12: Imm12 { diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index 778098d811..b9cbda583c 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -10,6 +10,7 @@ ;; An ALU operation with two register sources and a register destination. (AluRRR (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (rm Reg)) @@ -26,6 +27,7 @@ ;; destination. (AluRRImm12 (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (imm12 Imm12)) @@ -33,6 +35,7 @@ ;; An ALU operation with a register source and an immediate-logic source, and a register destination. (AluRRImmLogic (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (imml ImmLogic)) @@ -40,6 +43,7 @@ ;; An ALU operation with a register source and an immediate-shiftamt source, and a register destination. (AluRRImmShift (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (immshift ImmShift)) @@ -48,6 +52,7 @@ ;; destination. (AluRRRShift (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (rm Reg) @@ -57,6 +62,7 @@ ;; shifted, and a register destination. (AluRRRExtend (alu_op ALUOp) + (size OperandSize) (rd WritableReg) (rn Reg) (rm Reg) @@ -788,62 +794,39 @@ ;; below (see `Inst`) in any combination. (type ALUOp (enum - (Add32) - (Add64) - (Sub32) - (Sub64) - (Orr32) - (Orr64) - (OrrNot32) - (OrrNot64) - (And32) - (And64) - (AndS32) - (AndS64) - (AndNot32) - (AndNot64) + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) ;; XOR (AArch64 calls this "EOR") - (Eor32) - ;; XOR (AArch64 calls this "EOR") - (Eor64) + (Eor) ;; XNOR (AArch64 calls this "EOR-NOT") - (EorNot32) - ;; XNOR (AArch64 calls this "EOR-NOT") - (EorNot64) + (EorNot) ;; Add, setting flags - (AddS32) - ;; Add, setting flags - (AddS64) + (AddS) ;; Sub, setting flags - (SubS32) - ;; Sub, setting flags - (SubS64) + (SubS) ;; Signed multiply, high-word result (SMulH) ;; Unsigned multiply, high-word result (UMulH) - (SDiv64) - (UDiv64) - (RotR32) - (RotR64) - (Lsr32) - (Lsr64) - (Asr32) - (Asr64) - (Lsl32) - (Lsl64) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) ;; Add with carry - (Adc32) - (Adc64) + (Adc) ;; Add with carry, settings flags - (AdcS32) - (AdcS64) + (AdcS) ;; Subtract with carry - (Sbc32) - (Sbc64) + (Sbc) ;; Subtract with carry, settings flags - (SbcS32) - (SbcS64) + (SbcS) )) ;; An ALU operation with three arguments. @@ -910,6 +893,11 @@ (enum Size32 Size64)) +;; Helper for calculating the `OperandSize` corresponding to a type +(decl operand_size (Type) OperandSize) +(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + (type ScalarSize extern (enum Size8 Size16 @@ -1388,24 +1376,24 @@ (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRImmLogic` instructions. -(decl alu_rr_imm_logic (ALUOp Reg ImmLogic) Reg) -(rule (alu_rr_imm_logic op src imm) +(decl alu_rr_imm_logic (ALUOp Type Reg ImmLogic) Reg) +(rule (alu_rr_imm_logic op ty src imm) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImmLogic op dst src imm)))) + (_ Unit (emit (MInst.AluRRImmLogic op (operand_size ty) dst src imm)))) (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRImmShift` instructions. -(decl alu_rr_imm_shift (ALUOp Reg ImmShift) Reg) -(rule (alu_rr_imm_shift op src imm) +(decl alu_rr_imm_shift (ALUOp Type Reg ImmShift) Reg) +(rule (alu_rr_imm_shift op ty src imm) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImmShift op dst src imm)))) + (_ Unit (emit (MInst.AluRRImmShift op (operand_size ty) dst src imm)))) (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRR` instructions. -(decl alu_rrr (ALUOp Reg Reg) Reg) -(rule (alu_rrr op src1 src2) +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(rule (alu_rrr op ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRR op dst src1 src2)))) + (_ Unit (emit (MInst.AluRRR op (operand_size ty) dst src1 src2)))) (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.VecRRR` instructions. @@ -1430,33 +1418,33 @@ (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRImm12` instructions. -(decl alu_rr_imm12 (ALUOp Reg Imm12) Reg) -(rule (alu_rr_imm12 op src imm) +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(rule (alu_rr_imm12 op ty src imm) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImm12 op dst src imm)))) + (_ Unit (emit (MInst.AluRRImm12 op (operand_size ty) dst src imm)))) (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRRShift` instructions. -(decl alu_rrr_shift (ALUOp Reg Reg ShiftOpAndAmt) Reg) -(rule (alu_rrr_shift op src1 src2 shift) +(decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg) +(rule (alu_rrr_shift op ty src1 src2 shift) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRRShift op dst src1 src2 shift)))) + (_ Unit (emit (MInst.AluRRRShift op (operand_size ty) dst src1 src2 shift)))) (writable_reg_to_reg dst))) ;; Helper for emitting `MInst.AluRRRExtend` instructions. -(decl alu_rrr_extend (ALUOp Reg Reg ExtendOp) Reg) -(rule (alu_rrr_extend op src1 src2 extend) +(decl alu_rrr_extend (ALUOp Type Reg Reg ExtendOp) Reg) +(rule (alu_rrr_extend op ty src1 src2 extend) (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRRExtend op dst src1 src2 extend)))) + (_ Unit (emit (MInst.AluRRRExtend op (operand_size ty) dst src1 src2 extend)))) (writable_reg_to_reg dst))) ;; Same as `alu_rrr_extend`, but takes an `ExtendedValue` packed "pair" instead ;; of a `Reg` and an `ExtendOp`. -(decl alu_rr_extend_reg (ALUOp Reg ExtendedValue) Reg) -(rule (alu_rr_extend_reg op src1 extended_reg) +(decl alu_rr_extend_reg (ALUOp Type Reg ExtendedValue) Reg) +(rule (alu_rr_extend_reg op ty src1 extended_reg) (let ((src2 Reg (put_extended_in_reg extended_reg)) (extend ExtendOp (get_extended_op extended_reg))) - (alu_rrr_extend op src1 src2 extend))) + (alu_rrr_extend op ty src1 src2 extend))) ;; Helper for emitting `MInst.AluRRRR` instructions. (decl alu_rrrr (ALUOp3 Reg Reg Reg) Reg) @@ -1473,36 +1461,36 @@ (writable_reg_to_reg dst))) ;; Helper for emitting `adds` instructions. -(decl add64_with_flags (Reg Reg) ProducesFlags) -(rule (add64_with_flags src1 src2) +(decl add_with_flags (Type Reg Reg) ProducesFlags) +(rule (add_with_flags ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64))) - (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.AddS64) dst src1 src2) + (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.AddS) (operand_size ty) dst src1 src2) (writable_reg_to_reg dst)))) ;; Helper for emitting `adc` instructions. -(decl adc64 (Reg Reg) ConsumesFlags) -(rule (adc64 src1 src2) +(decl adc (Type Reg Reg) ConsumesFlags) +(rule (adc ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Adc64) dst src1 src2) + (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Adc) (operand_size ty) dst src1 src2) (writable_reg_to_reg dst)))) ;; Helper for emitting `subs` instructions. -(decl sub64_with_flags (Reg Reg) ProducesFlags) -(rule (sub64_with_flags src1 src2) +(decl sub_with_flags (Type Reg Reg) ProducesFlags) +(rule (sub_with_flags ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64))) - (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.SubS64) dst src1 src2) + (ProducesFlags.ProducesFlags (MInst.AluRRR (ALUOp.SubS) (operand_size ty) dst src1 src2) (writable_reg_to_reg dst)))) (decl cmp64_imm (Reg Imm12) ProducesFlags) (rule (cmp64_imm src1 src2) - (ProducesFlags.ProducesFlags (MInst.AluRRImm12 (ALUOp.SubS64) (writable_zero_reg) src1 src2) + (ProducesFlags.ProducesFlags (MInst.AluRRImm12 (ALUOp.SubS) (OperandSize.Size64) (writable_zero_reg) src1 src2) (zero_reg))) ;; Helper for emitting `sbc` instructions. -(decl sbc64 (Reg Reg) ConsumesFlags) -(rule (sbc64 src1 src2) +(decl sbc (Type Reg Reg) ConsumesFlags) +(rule (sbc ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Sbc64) dst src1 src2) + (ConsumesFlags.ConsumesFlags (MInst.AluRRR (ALUOp.Sbc) (operand_size ty) dst src1 src2) (writable_reg_to_reg dst)))) ;; Helper for emitting `MInst.VecMisc` instructions. @@ -1591,9 +1579,10 @@ ;; ;; Produces a `ProducesFlags` rather than a register or emitted instruction ;; which must be paired with `with_flags*` helpers. -(decl tst64_imm (Reg ImmLogic) ProducesFlags) -(rule (tst64_imm reg imm) - (ProducesFlags.ProducesFlags (MInst.AluRRImmLogic (ALUOp.AndS64) +(decl tst_imm (Type Reg ImmLogic) ProducesFlags) +(rule (tst_imm ty reg imm) + (ProducesFlags.ProducesFlags (MInst.AluRRImmLogic (ALUOp.AndS) + (operand_size ty) (writable_zero_reg) reg imm) @@ -1613,44 +1602,16 @@ ;; Helpers for generating `add` instructions. (decl add (Type Reg Reg) Reg) -(rule (add (fits_in_32 _ty) x y) (add32 x y)) -(rule (add $I64 x y) (add64 x y)) - -(decl add32 (Reg Reg) Reg) -(rule (add32 x y) (alu_rrr (ALUOp.Add32) x y)) - -(decl add64 (Reg Reg) Reg) -(rule (add64 x y) (alu_rrr (ALUOp.Add64) x y)) +(rule (add ty x y) (alu_rrr (ALUOp.Add) ty x y)) (decl add_imm (Type Reg Imm12) Reg) -(rule (add_imm (fits_in_32 _ty) x y) (add32_imm x y)) -(rule (add_imm $I64 x y) (add64_imm x y)) - -(decl add32_imm (Reg Imm12) Reg) -(rule (add32_imm x y) (alu_rr_imm12 (ALUOp.Add32) x y)) - -(decl add64_imm (Reg Imm12) Reg) -(rule (add64_imm x y) (alu_rr_imm12 (ALUOp.Add64) x y)) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) (decl add_extend (Type Reg ExtendedValue) Reg) -(rule (add_extend (fits_in_32 _ty) x y) (add32_extend x y)) -(rule (add_extend $I64 x y) (add64_extend x y)) - -(decl add32_extend (Reg ExtendedValue) Reg) -(rule (add32_extend x y) (alu_rr_extend_reg (ALUOp.Add32) x y)) - -(decl add64_extend (Reg ExtendedValue) Reg) -(rule (add64_extend x y) (alu_rr_extend_reg (ALUOp.Add64) x y)) +(rule (add_extend ty x y) (alu_rr_extend_reg (ALUOp.Add) ty x y)) (decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (add_shift (fits_in_32 _ty) x y z) (add32_shift x y z)) -(rule (add_shift $I64 x y z) (add64_shift x y z)) - -(decl add32_shift (Reg Reg ShiftOpAndAmt) Reg) -(rule (add32_shift x y z) (alu_rrr_shift (ALUOp.Add32) x y z)) - -(decl add64_shift (Reg Reg ShiftOpAndAmt) Reg) -(rule (add64_shift x y z) (alu_rrr_shift (ALUOp.Add64) x y z)) +(rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z)) (decl add_vec (Reg Reg VectorSize) Reg) (rule (add_vec x y size) (vec_rrr (VecALUOp.Add) x y size)) @@ -1658,44 +1619,16 @@ ;; Helpers for generating `sub` instructions. (decl sub (Type Reg Reg) Reg) -(rule (sub (fits_in_32 _ty) x y) (sub32 x y)) -(rule (sub $I64 x y) (sub64 x y)) - -(decl sub32 (Reg Reg) Reg) -(rule (sub32 x y) (alu_rrr (ALUOp.Sub32) x y)) - -(decl sub64 (Reg Reg) Reg) -(rule (sub64 x y) (alu_rrr (ALUOp.Sub64) x y)) +(rule (sub ty x y) (alu_rrr (ALUOp.Sub) ty x y)) (decl sub_imm (Type Reg Imm12) Reg) -(rule (sub_imm (fits_in_32 _ty) x y) (sub32_imm x y)) -(rule (sub_imm $I64 x y) (sub64_imm x y)) - -(decl sub32_imm (Reg Imm12) Reg) -(rule (sub32_imm x y) (alu_rr_imm12 (ALUOp.Sub32) x y)) - -(decl sub64_imm (Reg Imm12) Reg) -(rule (sub64_imm x y) (alu_rr_imm12 (ALUOp.Sub64) x y)) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) (decl sub_extend (Type Reg ExtendedValue) Reg) -(rule (sub_extend (fits_in_32 _ty) x y) (sub32_extend x y)) -(rule (sub_extend $I64 x y) (sub64_extend x y)) - -(decl sub32_extend (Reg ExtendedValue) Reg) -(rule (sub32_extend x y) (alu_rr_extend_reg (ALUOp.Sub32) x y)) - -(decl sub64_extend (Reg ExtendedValue) Reg) -(rule (sub64_extend x y) (alu_rr_extend_reg (ALUOp.Sub64) x y)) +(rule (sub_extend ty x y) (alu_rr_extend_reg (ALUOp.Sub) ty x y)) (decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (sub_shift (fits_in_32 _ty) x y z) (sub32_shift x y z)) -(rule (sub_shift $I64 x y z) (sub64_shift x y z)) - -(decl sub32_shift (Reg Reg ShiftOpAndAmt) Reg) -(rule (sub32_shift x y z) (alu_rrr_shift (ALUOp.Sub32) x y z)) - -(decl sub64_shift (Reg Reg ShiftOpAndAmt) Reg) -(rule (sub64_shift x y z) (alu_rrr_shift (ALUOp.Sub64) x y z)) +(rule (sub_shift ty x y z) (alu_rrr_shift (ALUOp.Sub) ty x y z)) (decl sub_vec (Reg Reg VectorSize) Reg) (rule (sub_vec x y size) (vec_rrr (VecALUOp.Sub) x y size)) @@ -1734,12 +1667,12 @@ (rule (sqsub x y size) (vec_rrr (VecALUOp.Sqsub) x y size)) ;; Helper for generating `umulh` instructions. -(decl umulh (Reg Reg) Reg) -(rule (umulh x y) (alu_rrr (ALUOp.UMulH) x y)) +(decl umulh (Type Reg Reg) Reg) +(rule (umulh ty x y) (alu_rrr (ALUOp.UMulH) ty x y)) ;; Helper for generating `smulh` instructions. -(decl smulh (Reg Reg) Reg) -(rule (smulh x y) (alu_rrr (ALUOp.SMulH) x y)) +(decl smulh (Type Reg Reg) Reg) +(rule (smulh ty x y) (alu_rrr (ALUOp.SMulH) ty x y)) ;; Helper for generating `mul` instructions. (decl mul (Reg Reg VectorSize) Reg) @@ -1798,45 +1731,33 @@ (rule (umull32 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull32) x y high_half)) ;; Helper for generating `asr` instructions. -(decl asr64 (Reg Reg) Reg) -(rule (asr64 x y) (alu_rrr (ALUOp.Asr64) x y)) +(decl asr (Type Reg Reg) Reg) +(rule (asr ty x y) (alu_rrr (ALUOp.Asr) ty x y)) -(decl asr64_imm (Reg ImmShift) Reg) -(rule (asr64_imm x imm) (alu_rr_imm_shift (ALUOp.Asr64) x imm)) +(decl asr_imm (Type Reg ImmShift) Reg) +(rule (asr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Asr) ty x imm)) ;; Helper for generating `lsr` instructions. -(decl lsr32 (Reg Reg) Reg) -(rule (lsr32 x y) (alu_rrr (ALUOp.Lsr32) x y)) +(decl lsr (Type Reg Reg) Reg) +(rule (lsr ty x y) (alu_rrr (ALUOp.Lsr) ty x y)) -(decl lsr32_imm (Reg ImmShift) Reg) -(rule (lsr32_imm x imm) (alu_rr_imm_shift (ALUOp.Lsr32) x imm)) - -(decl lsr64 (Reg Reg) Reg) -(rule (lsr64 x y) (alu_rrr (ALUOp.Lsr64) x y)) - -(decl lsr64_imm (Reg ImmShift) Reg) -(rule (lsr64_imm x imm) (alu_rr_imm_shift (ALUOp.Lsr64) x imm)) +(decl lsr_imm (Type Reg ImmShift) Reg) +(rule (lsr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsr) ty x imm)) ;; Helper for generating `lsl` instructions. -(decl lsl32 (Reg Reg) Reg) -(rule (lsl32 x y) (alu_rrr (ALUOp.Lsl32) x y)) +(decl lsl (Type Reg Reg) Reg) +(rule (lsl ty x y) (alu_rrr (ALUOp.Lsl) ty x y)) -(decl lsl32_imm (Reg ImmShift) Reg) -(rule (lsl32_imm x imm) (alu_rr_imm_shift (ALUOp.Lsl32) x imm)) - -(decl lsl64 (Reg Reg) Reg) -(rule (lsl64 x y) (alu_rrr (ALUOp.Lsl64) x y)) - -(decl lsl64_imm (Reg ImmShift) Reg) -(rule (lsl64_imm x imm) (alu_rr_imm_shift (ALUOp.Lsl64) x imm)) +(decl lsl_imm (Type Reg ImmShift) Reg) +(rule (lsl_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsl) ty x imm)) ;; Helper for generating `udiv` instructions. -(decl udiv64 (Reg Reg) Reg) -(rule (udiv64 x y) (alu_rrr (ALUOp.UDiv64) x y)) +(decl a64_udiv (Type Reg Reg) Reg) +(rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y)) ;; Helper for generating `sdiv` instructions. -(decl sdiv64 (Reg Reg) Reg) -(rule (sdiv64 x y) (alu_rrr (ALUOp.SDiv64) x y)) +(decl a64_sdiv (Type Reg Reg) Reg) +(rule (a64_sdiv ty x y) (alu_rrr (ALUOp.SDiv) ty x y)) ;; Helper for generating `not` instructions. (decl not (Reg VectorSize) Reg) @@ -1845,46 +1766,26 @@ ;; Helpers for generating `orr_not` instructions. (decl orr_not (Type Reg Reg) Reg) -(rule (orr_not (fits_in_32 _ty) x y) (orr_not32 x y)) -(rule (orr_not $I64 x y) (orr_not64 x y)) - -(decl orr_not32 (Reg Reg) Reg) -(rule (orr_not32 x y) (alu_rrr (ALUOp.OrrNot32) x y)) - -(decl orr_not64 (Reg Reg) Reg) -(rule (orr_not64 x y) (alu_rrr (ALUOp.OrrNot64) x y)) +(rule (orr_not ty x y) (alu_rrr (ALUOp.OrrNot) ty x y)) (decl orr_not_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (orr_not_shift (fits_in_32 _ty) x y shift) (orr_not_shift32 x y shift)) -(rule (orr_not_shift $I64 x y shift) (orr_not_shift64 x y shift)) - -(decl orr_not_shift32 (Reg Reg ShiftOpAndAmt) Reg) -(rule (orr_not_shift32 x y shift) (alu_rrr_shift (ALUOp.OrrNot32) x y shift)) - -(decl orr_not_shift64 (Reg Reg ShiftOpAndAmt) Reg) -(rule (orr_not_shift64 x y shift) (alu_rrr_shift (ALUOp.OrrNot64) x y shift)) +(rule (orr_not_shift ty x y shift) (alu_rrr_shift (ALUOp.OrrNot) ty x y shift)) ;; Helpers for generating `orr` instructions. -(decl orr32 (Reg Reg) Reg) -(rule (orr32 x y) (alu_rrr (ALUOp.Orr32) x y)) +(decl orr (Type Reg Reg) Reg) +(rule (orr ty x y) (alu_rrr (ALUOp.Orr) ty x y)) -(decl orr32_imm (Reg ImmLogic) Reg) -(rule (orr32_imm x y) (alu_rr_imm_logic (ALUOp.Orr32) x y)) - -(decl orr64 (Reg Reg) Reg) -(rule (orr64 x y) (alu_rrr (ALUOp.Orr64) x y)) - -(decl orr64_imm (Reg ImmLogic) Reg) -(rule (orr64_imm x y) (alu_rr_imm_logic (ALUOp.Orr64) x y)) +(decl orr_imm (Type Reg ImmLogic) Reg) +(rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y)) (decl orr_vec (Reg Reg VectorSize) Reg) (rule (orr_vec x y size) (vec_rrr (VecALUOp.Orr) x y size)) ;; Helpers for generating `and` instructions. -(decl and32_imm (Reg ImmLogic) Reg) -(rule (and32_imm x y) (alu_rr_imm_logic (ALUOp.And32) x y)) +(decl and_imm (Type Reg ImmLogic) Reg) +(rule (and_imm ty x y) (alu_rr_imm_logic (ALUOp.And) ty x y)) (decl and_vec (Reg Reg VectorSize) Reg) (rule (and_vec x y size) (vec_rrr (VecALUOp.And) x y size)) @@ -1907,17 +1808,11 @@ ;; Helpers for generating `rotr` instructions. -(decl rotr32 (Reg Reg) Reg) -(rule (rotr32 x y) (alu_rrr (ALUOp.RotR32) x y)) +(decl a64_rotr (Type Reg Reg) Reg) +(rule (a64_rotr ty x y) (alu_rrr (ALUOp.RotR) ty x y)) -(decl rotr32_imm (Reg ImmShift) Reg) -(rule (rotr32_imm x y) (alu_rr_imm_shift (ALUOp.RotR32) x y)) - -(decl rotr64 (Reg Reg) Reg) -(rule (rotr64 x y) (alu_rrr (ALUOp.RotR64) x y)) - -(decl rotr64_imm (Reg ImmShift) Reg) -(rule (rotr64_imm x y) (alu_rr_imm_shift (ALUOp.RotR64) x y)) +(decl a64_rotr_imm (Type Reg ImmShift) Reg) +(rule (a64_rotr_imm ty x y) (alu_rr_imm_shift (ALUOp.RotR) ty x y)) ;; Helpers for generating `rbit` instructions. @@ -1945,11 +1840,8 @@ ;; Helpers for generating `eon` instructions. -(decl eon32 (Reg Reg) Reg) -(rule (eon32 x y) (alu_rrr (ALUOp.EorNot32) x y)) - -(decl eon64 (Reg Reg) Reg) -(rule (eon64 x y) (alu_rrr (ALUOp.EorNot64) x y)) +(decl eon (Type Reg Reg) Reg) +(rule (eon ty x y) (alu_rrr (ALUOp.EorNot) ty x y)) ;; Helpers for generating `cnt` instructions. @@ -1970,7 +1862,7 @@ ;; Weird logical-instruction immediate in ORI using zero register (rule (imm (integral_ty _ty) (imm_logic_from_u64 <$I64 n)) - (orr64_imm (zero_reg) n)) + (orr_imm $I64 (zero_reg) n)) (decl load_constant64_full (u64) Reg) (extern constructor load_constant64_full load_constant64_full) @@ -2033,7 +1925,7 @@ (rule (trap_if_div_overflow ty x y) (let ( ;; Check RHS is -1. - (_1 Unit (emit (MInst.AluRRImm12 (adds_op ty) (writable_zero_reg) y (u8_into_imm12 1)))) + (_1 Unit (emit (MInst.AluRRImm12 (ALUOp.AddS) (operand_size ty) (writable_zero_reg) y (u8_into_imm12 1)))) ;; Check LHS is min_value, by subtracting 1 and branching if ;; there is overflow. @@ -2047,11 +1939,6 @@ ) x)) -;; Helper to use either a 32 or 64-bit adds depending on the input type. -(decl adds_op (Type) ALUOp) -(rule (adds_op (fits_in_32 _ty)) (ALUOp.AddS32)) -(rule (adds_op $I64) (ALUOp.AddS64)) - ;; An atomic load that can be sunk into another operation. (type SinkableAtomicLoad extern (enum)) @@ -2075,36 +1962,36 @@ ;; Base case of operating on registers. (rule (alu_rs_imm_logic_commutative op ty x y) - (alu_rrr op (put_in_reg x) (put_in_reg y))) + (alu_rrr op ty (put_in_reg x) (put_in_reg y))) ;; Special cases for when one operand is a constant. (rule (alu_rs_imm_logic_commutative op ty x (def_inst (iconst (imm_logic_from_imm64 { + &Inst::AluRRR { + alu_op, + size, + rd, + rn, + rm, + } => { + debug_assert!(match alu_op { + ALUOp::SDiv | ALUOp::UDiv | ALUOp::SMulH | ALUOp::UMulH => + size == OperandSize::Size64, + _ => true, + }); let top11 = match alu_op { - ALUOp::Add32 => 0b00001011_000, - ALUOp::Add64 => 0b10001011_000, - ALUOp::Adc32 => 0b00011010_000, - ALUOp::Adc64 => 0b10011010_000, - ALUOp::AdcS32 => 0b00111010_000, - ALUOp::AdcS64 => 0b10111010_000, - ALUOp::Sub32 => 0b01001011_000, - ALUOp::Sub64 => 0b11001011_000, - ALUOp::Sbc32 => 0b01011010_000, - ALUOp::Sbc64 => 0b11011010_000, - ALUOp::SbcS32 => 0b01111010_000, - ALUOp::SbcS64 => 0b11111010_000, - ALUOp::Orr32 => 0b00101010_000, - ALUOp::Orr64 => 0b10101010_000, - ALUOp::And32 => 0b00001010_000, - ALUOp::And64 => 0b10001010_000, - ALUOp::AndS32 => 0b01101010_000, - ALUOp::AndS64 => 0b11101010_000, - ALUOp::Eor32 => 0b01001010_000, - ALUOp::Eor64 => 0b11001010_000, - ALUOp::OrrNot32 => 0b00101010_001, - ALUOp::OrrNot64 => 0b10101010_001, - ALUOp::AndNot32 => 0b00001010_001, - ALUOp::AndNot64 => 0b10001010_001, - ALUOp::EorNot32 => 0b01001010_001, - ALUOp::EorNot64 => 0b11001010_001, - ALUOp::AddS32 => 0b00101011_000, - ALUOp::AddS64 => 0b10101011_000, - ALUOp::SubS32 => 0b01101011_000, - ALUOp::SubS64 => 0b11101011_000, - ALUOp::SDiv64 => 0b10011010_110, - ALUOp::UDiv64 => 0b10011010_110, - ALUOp::RotR32 | ALUOp::Lsr32 | ALUOp::Asr32 | ALUOp::Lsl32 => 0b00011010_110, - ALUOp::RotR64 | ALUOp::Lsr64 | ALUOp::Asr64 | ALUOp::Lsl64 => 0b10011010_110, + ALUOp::Add => 0b00001011_000, + ALUOp::Adc => 0b00011010_000, + ALUOp::AdcS => 0b00111010_000, + ALUOp::Sub => 0b01001011_000, + ALUOp::Sbc => 0b01011010_000, + ALUOp::SbcS => 0b01111010_000, + ALUOp::Orr => 0b00101010_000, + ALUOp::And => 0b00001010_000, + ALUOp::AndS => 0b01101010_000, + ALUOp::Eor => 0b01001010_000, + ALUOp::OrrNot => 0b00101010_001, + ALUOp::AndNot => 0b00001010_001, + ALUOp::EorNot => 0b01001010_001, + ALUOp::AddS => 0b00101011_000, + ALUOp::SubS => 0b01101011_000, + ALUOp::SDiv => 0b10011010_110, + ALUOp::UDiv => 0b10011010_110, + ALUOp::RotR | ALUOp::Lsr | ALUOp::Asr | ALUOp::Lsl => 0b00011010_110, ALUOp::SMulH => 0b10011011_010, ALUOp::UMulH => 0b10011011_110, }; + let top11 = top11 | size.sf_bit() << 10; let bit15_10 = match alu_op { - ALUOp::SDiv64 => 0b000011, - ALUOp::UDiv64 => 0b000010, - ALUOp::RotR32 | ALUOp::RotR64 => 0b001011, - ALUOp::Lsr32 | ALUOp::Lsr64 => 0b001001, - ALUOp::Asr32 | ALUOp::Asr64 => 0b001010, - ALUOp::Lsl32 | ALUOp::Lsl64 => 0b001000, + ALUOp::SDiv => 0b000011, + ALUOp::UDiv => 0b000010, + ALUOp::RotR => 0b001011, + ALUOp::Lsr => 0b001001, + ALUOp::Asr => 0b001010, + ALUOp::Lsl => 0b001000, ALUOp::SMulH | ALUOp::UMulH => 0b011111, _ => 0b000000, }; @@ -755,21 +752,19 @@ impl MachInstEmit for Inst { } &Inst::AluRRImm12 { alu_op, + size, rd, rn, ref imm12, } => { let top8 = match alu_op { - ALUOp::Add32 => 0b000_10001, - ALUOp::Add64 => 0b100_10001, - ALUOp::Sub32 => 0b010_10001, - ALUOp::Sub64 => 0b110_10001, - ALUOp::AddS32 => 0b001_10001, - ALUOp::AddS64 => 0b101_10001, - ALUOp::SubS32 => 0b011_10001, - ALUOp::SubS64 => 0b111_10001, + ALUOp::Add => 0b000_10001, + ALUOp::Sub => 0b010_10001, + ALUOp::AddS => 0b001_10001, + ALUOp::SubS => 0b011_10001, _ => unimplemented!("{:?}", alu_op), }; + let top8 = top8 | size.sf_bit() << 7; sink.put4(enc_arith_rr_imm12( top8, imm12.shift_bits(), @@ -780,57 +775,53 @@ impl MachInstEmit for Inst { } &Inst::AluRRImmLogic { alu_op, + size, rd, rn, ref imml, } => { let (top9, inv) = match alu_op { - ALUOp::Orr32 => (0b001_100100, false), - ALUOp::Orr64 => (0b101_100100, false), - ALUOp::And32 => (0b000_100100, false), - ALUOp::And64 => (0b100_100100, false), - ALUOp::AndS32 => (0b011_100100, false), - ALUOp::AndS64 => (0b111_100100, false), - ALUOp::Eor32 => (0b010_100100, false), - ALUOp::Eor64 => (0b110_100100, false), - ALUOp::OrrNot32 => (0b001_100100, true), - ALUOp::OrrNot64 => (0b101_100100, true), - ALUOp::AndNot32 => (0b000_100100, true), - ALUOp::AndNot64 => (0b100_100100, true), - ALUOp::EorNot32 => (0b010_100100, true), - ALUOp::EorNot64 => (0b110_100100, true), + ALUOp::Orr => (0b001_100100, false), + ALUOp::And => (0b000_100100, false), + ALUOp::AndS => (0b011_100100, false), + ALUOp::Eor => (0b010_100100, false), + ALUOp::OrrNot => (0b001_100100, true), + ALUOp::AndNot => (0b000_100100, true), + ALUOp::EorNot => (0b010_100100, true), _ => unimplemented!("{:?}", alu_op), }; + let top9 = top9 | size.sf_bit() << 8; let imml = if inv { imml.invert() } else { imml.clone() }; sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd)); } &Inst::AluRRImmShift { alu_op, + size, rd, rn, ref immshift, } => { let amt = immshift.value(); let (top10, immr, imms) = match alu_op { - ALUOp::RotR32 => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)), - ALUOp::RotR64 => (0b1001001111, machreg_to_gpr(rn), u32::from(amt)), - ALUOp::Lsr32 => (0b0101001100, u32::from(amt), 0b011111), - ALUOp::Lsr64 => (0b1101001101, u32::from(amt), 0b111111), - ALUOp::Asr32 => (0b0001001100, u32::from(amt), 0b011111), - ALUOp::Asr64 => (0b1001001101, u32::from(amt), 0b111111), - ALUOp::Lsl32 => ( - 0b0101001100, - u32::from((32 - amt) % 32), - u32::from(31 - amt), - ), - ALUOp::Lsl64 => ( - 0b1101001101, - u32::from((64 - amt) % 64), - u32::from(63 - amt), - ), + ALUOp::RotR => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)), + ALUOp::Lsr => (0b0101001100, u32::from(amt), 0b011111), + ALUOp::Asr => (0b0001001100, u32::from(amt), 0b011111), + ALUOp::Lsl => { + let bits = if size.is64() { 64 } else { 32 }; + ( + 0b0101001100, + u32::from((bits - amt) % bits), + u32::from(bits - 1 - amt), + ) + } _ => unimplemented!("{:?}", alu_op), }; + let top10 = top10 | size.sf_bit() << 9 | size.sf_bit(); + let imms = match alu_op { + ALUOp::Lsr | ALUOp::Asr => imms | size.sf_bit() << 5, + _ => imms, + }; sink.put4( (top10 << 22) | (immr << 16) @@ -842,36 +833,27 @@ impl MachInstEmit for Inst { &Inst::AluRRRShift { alu_op, + size, rd, rn, rm, ref shiftop, } => { let top11: u32 = match alu_op { - ALUOp::Add32 => 0b000_01011000, - ALUOp::Add64 => 0b100_01011000, - ALUOp::AddS32 => 0b001_01011000, - ALUOp::AddS64 => 0b101_01011000, - ALUOp::Sub32 => 0b010_01011000, - ALUOp::Sub64 => 0b110_01011000, - ALUOp::SubS32 => 0b011_01011000, - ALUOp::SubS64 => 0b111_01011000, - ALUOp::Orr32 => 0b001_01010000, - ALUOp::Orr64 => 0b101_01010000, - ALUOp::And32 => 0b000_01010000, - ALUOp::And64 => 0b100_01010000, - ALUOp::AndS32 => 0b011_01010000, - ALUOp::AndS64 => 0b111_01010000, - ALUOp::Eor32 => 0b010_01010000, - ALUOp::Eor64 => 0b110_01010000, - ALUOp::OrrNot32 => 0b001_01010001, - ALUOp::OrrNot64 => 0b101_01010001, - ALUOp::EorNot32 => 0b010_01010001, - ALUOp::EorNot64 => 0b110_01010001, - ALUOp::AndNot32 => 0b000_01010001, - ALUOp::AndNot64 => 0b100_01010001, + ALUOp::Add => 0b000_01011000, + ALUOp::AddS => 0b001_01011000, + ALUOp::Sub => 0b010_01011000, + ALUOp::SubS => 0b011_01011000, + ALUOp::Orr => 0b001_01010000, + ALUOp::And => 0b000_01010000, + ALUOp::AndS => 0b011_01010000, + ALUOp::Eor => 0b010_01010000, + ALUOp::OrrNot => 0b001_01010001, + ALUOp::EorNot => 0b010_01010001, + ALUOp::AndNot => 0b000_01010001, _ => unimplemented!("{:?}", alu_op), }; + let top11 = top11 | size.sf_bit() << 10; let top11 = top11 | (u32::from(shiftop.op().bits()) << 1); let bits_15_10 = u32::from(shiftop.amt().value()); sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); @@ -879,22 +861,20 @@ impl MachInstEmit for Inst { &Inst::AluRRRExtend { alu_op, + size, rd, rn, rm, extendop, } => { let top11: u32 = match alu_op { - ALUOp::Add32 => 0b00001011001, - ALUOp::Add64 => 0b10001011001, - ALUOp::Sub32 => 0b01001011001, - ALUOp::Sub64 => 0b11001011001, - ALUOp::AddS32 => 0b00101011001, - ALUOp::AddS64 => 0b10101011001, - ALUOp::SubS32 => 0b01101011001, - ALUOp::SubS64 => 0b11101011001, + ALUOp::Add => 0b00001011001, + ALUOp::Sub => 0b01001011001, + ALUOp::AddS => 0b00101011001, + ALUOp::SubS => 0b01101011001, _ => unimplemented!("{:?}", alu_op), }; + let top11 = top11 | size.sf_bit() << 10; let bits_15_10 = u32::from(extendop.bits()) << 3; sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); } @@ -1394,7 +1374,8 @@ impl MachInstEmit for Inst { // mvn x28, x28 Inst::AluRRR { - alu_op: ALUOp::And64, + alu_op: ALUOp::And, + size: OperandSize::Size64, rd: x28wr, rn: x27, rm: x26, @@ -1402,7 +1383,8 @@ impl MachInstEmit for Inst { .emit(sink, emit_info, state); Inst::AluRRR { - alu_op: ALUOp::OrrNot64, + alu_op: ALUOp::OrrNot, + size: OperandSize::Size64, rd: x28wr, rn: xzr, rm: x28, @@ -1425,11 +1407,8 @@ impl MachInstEmit for Inst { }; Inst::AluRRR { - alu_op: if ty == I64 { - ALUOp::SubS64 - } else { - ALUOp::SubS32 - }, + alu_op: ALUOp::SubS, + size: OperandSize::from_ty(ty), rd: writable_zero_reg(), rn: x27, rm: x26, @@ -1447,11 +1426,11 @@ impl MachInstEmit for Inst { _ => { // add/sub/and/orr/eor x28, x27, x26 let alu_op = match op { - AtomicRmwOp::Add => ALUOp::Add64, - AtomicRmwOp::Sub => ALUOp::Sub64, - AtomicRmwOp::And => ALUOp::And64, - AtomicRmwOp::Or => ALUOp::Orr64, - AtomicRmwOp::Xor => ALUOp::Eor64, + AtomicRmwOp::Add => ALUOp::Add, + AtomicRmwOp::Sub => ALUOp::Sub, + AtomicRmwOp::And => ALUOp::And, + AtomicRmwOp::Or => ALUOp::Orr, + AtomicRmwOp::Xor => ALUOp::Eor, AtomicRmwOp::Nand | AtomicRmwOp::Umin | AtomicRmwOp::Umax @@ -1462,6 +1441,7 @@ impl MachInstEmit for Inst { Inst::AluRRR { alu_op, + size: OperandSize::Size64, rd: x28wr, rn: x27, rm: x26, @@ -2478,7 +2458,8 @@ impl MachInstEmit for Inst { // than AND on smaller cores. let imml = ImmLogic::maybe_from_u64(1, I32).unwrap(); Inst::AluRRImmLogic { - alu_op: ALUOp::And32, + alu_op: ALUOp::And, + size: OperandSize::Size32, rd, rn, imml, @@ -2655,7 +2636,8 @@ impl MachInstEmit for Inst { inst.emit(sink, emit_info, state); // Add base of jump table to jump-table-sourced block offset let inst = Inst::AluRRR { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: rtmp1, rn: rtmp1.to_reg(), rm: rtmp2.to_reg(), @@ -2731,15 +2713,12 @@ impl MachInstEmit for Inst { } else { offset as u64 }; - let alu_op = if offset < 0 { - ALUOp::Sub64 - } else { - ALUOp::Add64 - }; + let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add }; if let Some((idx, extendop)) = index_reg { let add = Inst::AluRRRExtend { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd, rn: reg, rm: idx, @@ -2756,6 +2735,7 @@ impl MachInstEmit for Inst { } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { let add = Inst::AluRRImm12 { alu_op, + size: OperandSize::Size64, rd, rn: reg, imm12, @@ -2775,6 +2755,7 @@ impl MachInstEmit for Inst { } let add = Inst::AluRRR { alu_op, + size: OperandSize::Size64, rd, rn: reg, rm: tmp.to_reg(), diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs index 31884dc930..5e3902dd2a 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs @@ -32,7 +32,8 @@ fn test_aarch64_binemit() { insns.push((Inst::Nop4, "1F2003D5", "nop")); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -42,7 +43,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -52,7 +54,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Adc32, + alu_op: ALUOp::Adc, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -62,7 +65,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Adc64, + alu_op: ALUOp::Adc, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -72,7 +76,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AdcS32, + alu_op: ALUOp::AdcS, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -82,7 +87,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AdcS64, + alu_op: ALUOp::AdcS, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -92,7 +98,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Sub32, + alu_op: ALUOp::Sub, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -102,7 +109,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Sub64, + alu_op: ALUOp::Sub, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -112,7 +120,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Sbc32, + alu_op: ALUOp::Sbc, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -122,7 +131,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Sbc64, + alu_op: ALUOp::Sbc, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -132,7 +142,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SbcS32, + alu_op: ALUOp::SbcS, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -142,7 +153,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SbcS64, + alu_op: ALUOp::SbcS, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -153,7 +165,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRR { - alu_op: ALUOp::Orr32, + alu_op: ALUOp::Orr, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -163,7 +176,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -173,7 +187,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::And32, + alu_op: ALUOp::And, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -183,7 +198,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::And64, + alu_op: ALUOp::And, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -193,7 +209,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AndS32, + alu_op: ALUOp::AndS, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -203,7 +220,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AndS64, + alu_op: ALUOp::AndS, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -213,7 +231,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_zero_reg(), rn: xreg(2), rm: xreg(3), @@ -224,7 +243,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -234,7 +254,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -244,7 +265,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AddS32, + alu_op: ALUOp::AddS, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -254,7 +276,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AddS64, + alu_op: ALUOp::AddS, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -264,7 +287,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::AddS64, + alu_op: ALUOp::AddS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: xreg(5), imm12: Imm12::maybe_from_u64(1).unwrap(), @@ -275,7 +299,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::SDiv64, + alu_op: ALUOp::SDiv, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -285,7 +310,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::UDiv64, + alu_op: ALUOp::UDiv, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -296,7 +322,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRR { - alu_op: ALUOp::Eor32, + alu_op: ALUOp::Eor, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -306,7 +333,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Eor64, + alu_op: ALUOp::Eor, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -316,7 +344,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AndNot32, + alu_op: ALUOp::AndNot, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -326,7 +355,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::AndNot64, + alu_op: ALUOp::AndNot, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -336,7 +366,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::OrrNot32, + alu_op: ALUOp::OrrNot, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -346,7 +377,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::OrrNot64, + alu_op: ALUOp::OrrNot, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -356,7 +388,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::EorNot32, + alu_op: ALUOp::EorNot, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -366,7 +399,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::EorNot64, + alu_op: ALUOp::EorNot, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -377,7 +411,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRR { - alu_op: ALUOp::RotR32, + alu_op: ALUOp::RotR, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -387,7 +422,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::RotR64, + alu_op: ALUOp::RotR, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -397,7 +433,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Lsr32, + alu_op: ALUOp::Lsr, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -407,7 +444,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Lsr64, + alu_op: ALUOp::Lsr, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -417,7 +455,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Asr32, + alu_op: ALUOp::Asr, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -427,7 +466,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Asr64, + alu_op: ALUOp::Asr, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -437,7 +477,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Lsl32, + alu_op: ALUOp::Lsl, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -447,7 +488,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRR { - alu_op: ALUOp::Lsl64, + alu_op: ALUOp::Lsl, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), rm: xreg(6), @@ -458,7 +500,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -471,7 +514,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -484,7 +528,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -497,7 +542,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::Sub32, + alu_op: ALUOp::Sub, + size: OperandSize::Size32, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -510,7 +556,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::Sub64, + alu_op: ALUOp::Sub, + size: OperandSize::Size64, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -523,7 +570,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -536,7 +584,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImm12 { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_xreg(7), rn: xreg(8), imm12: Imm12 { @@ -550,7 +599,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRExtend { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: writable_xreg(7), rn: xreg(8), rm: xreg(9), @@ -562,7 +612,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRExtend { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: writable_xreg(15), rn: xreg(16), rm: xreg(17), @@ -574,7 +625,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRExtend { - alu_op: ALUOp::Sub32, + alu_op: ALUOp::Sub, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -586,7 +638,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRExtend { - alu_op: ALUOp::Sub64, + alu_op: ALUOp::Sub, + size: OperandSize::Size64, rd: writable_xreg(20), rn: xreg(21), rm: xreg(22), @@ -598,7 +651,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -612,7 +666,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -626,7 +681,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Sub32, + alu_op: ALUOp::Sub, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -640,7 +696,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Sub64, + alu_op: ALUOp::Sub, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -654,7 +711,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Orr32, + alu_op: ALUOp::Orr, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -668,7 +726,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -682,7 +741,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::And32, + alu_op: ALUOp::And, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -696,7 +756,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::And64, + alu_op: ALUOp::And, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -710,7 +771,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AndS32, + alu_op: ALUOp::AndS, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -724,7 +786,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AndS64, + alu_op: ALUOp::AndS, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -738,7 +801,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Eor32, + alu_op: ALUOp::Eor, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -752,7 +816,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::Eor64, + alu_op: ALUOp::Eor, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -766,7 +831,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::OrrNot32, + alu_op: ALUOp::OrrNot, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -780,7 +846,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::OrrNot64, + alu_op: ALUOp::OrrNot, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -794,7 +861,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AndNot32, + alu_op: ALUOp::AndNot, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -808,7 +876,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AndNot64, + alu_op: ALUOp::AndNot, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -822,7 +891,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::EorNot32, + alu_op: ALUOp::EorNot, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -836,7 +906,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::EorNot64, + alu_op: ALUOp::EorNot, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -850,7 +921,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AddS32, + alu_op: ALUOp::AddS, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -864,7 +936,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::AddS64, + alu_op: ALUOp::AddS, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -878,7 +951,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -892,7 +966,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRRShift { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), rm: xreg(12), @@ -907,7 +982,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRRExtend { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: stack_reg(), rm: xreg(12), @@ -964,6 +1040,7 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRR { alu_op: ALUOp::SMulH, + size: OperandSize::Size64, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -974,6 +1051,7 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRR { alu_op: ALUOp::UMulH, + size: OperandSize::Size64, rd: writable_xreg(1), rn: xreg(2), rm: xreg(3), @@ -984,7 +1062,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::RotR32, + alu_op: ALUOp::RotR, + size: OperandSize::Size32, rd: writable_xreg(20), rn: xreg(21), immshift: ImmShift::maybe_from_u64(19).unwrap(), @@ -994,7 +1073,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::RotR64, + alu_op: ALUOp::RotR, + size: OperandSize::Size64, rd: writable_xreg(20), rn: xreg(21), immshift: ImmShift::maybe_from_u64(42).unwrap(), @@ -1004,7 +1084,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsr32, + alu_op: ALUOp::Lsr, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), immshift: ImmShift::maybe_from_u64(13).unwrap(), @@ -1014,7 +1095,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsr64, + alu_op: ALUOp::Lsr, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), immshift: ImmShift::maybe_from_u64(57).unwrap(), @@ -1024,7 +1106,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Asr32, + alu_op: ALUOp::Asr, + size: OperandSize::Size32, rd: writable_xreg(4), rn: xreg(5), immshift: ImmShift::maybe_from_u64(7).unwrap(), @@ -1034,7 +1117,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Asr64, + alu_op: ALUOp::Asr, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), immshift: ImmShift::maybe_from_u64(35).unwrap(), @@ -1044,7 +1128,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsl32, + alu_op: ALUOp::Lsl, + size: OperandSize::Size32, rd: writable_xreg(8), rn: xreg(9), immshift: ImmShift::maybe_from_u64(24).unwrap(), @@ -1054,7 +1139,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsl64, + alu_op: ALUOp::Lsl, + size: OperandSize::Size64, rd: writable_xreg(8), rn: xreg(9), immshift: ImmShift::maybe_from_u64(63).unwrap(), @@ -1064,7 +1150,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsl32, + alu_op: ALUOp::Lsl, + size: OperandSize::Size32, rd: writable_xreg(10), rn: xreg(11), immshift: ImmShift::maybe_from_u64(0).unwrap(), @@ -1074,7 +1161,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmShift { - alu_op: ALUOp::Lsl64, + alu_op: ALUOp::Lsl, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(11), immshift: ImmShift::maybe_from_u64(0).unwrap(), @@ -1085,7 +1173,8 @@ fn test_aarch64_binemit() { insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::And32, + alu_op: ALUOp::And, + size: OperandSize::Size32, rd: writable_xreg(21), rn: xreg(27), imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(), @@ -1095,7 +1184,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::And64, + alu_op: ALUOp::And, + size: OperandSize::Size64, rd: writable_xreg(7), rn: xreg(6), imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(), @@ -1105,7 +1195,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::AndS32, + alu_op: ALUOp::AndS, + size: OperandSize::Size32, rd: writable_xreg(21), rn: xreg(27), imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(), @@ -1115,7 +1206,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::AndS64, + alu_op: ALUOp::AndS, + size: OperandSize::Size64, rd: writable_xreg(7), rn: xreg(6), imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(), @@ -1125,7 +1217,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::Orr32, + alu_op: ALUOp::Orr, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(5), imml: ImmLogic::maybe_from_u64(0x100000, I32).unwrap(), @@ -1135,7 +1228,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd: writable_xreg(4), rn: xreg(5), imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(), @@ -1145,7 +1239,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::Eor32, + alu_op: ALUOp::Eor, + size: OperandSize::Size32, rd: writable_xreg(1), rn: xreg(5), imml: ImmLogic::maybe_from_u64(0x00007fff, I32).unwrap(), @@ -1155,7 +1250,8 @@ fn test_aarch64_binemit() { )); insns.push(( Inst::AluRRImmLogic { - alu_op: ALUOp::Eor64, + alu_op: ALUOp::Eor, + size: OperandSize::Size64, rd: writable_xreg(10), rn: xreg(8), imml: ImmLogic::maybe_from_u64(0x8181818181818181, I64).unwrap(), diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 27a3445016..83fa3ec870 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -168,7 +168,8 @@ impl Inst { } else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) { // Weird logical-instruction immediate in ORI using zero register smallvec![Inst::AluRRImmLogic { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd, rn: zero_reg(), imml, @@ -2097,58 +2098,45 @@ impl PrettyPrint for Inst { impl Inst { fn print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String { - fn op_name_size(alu_op: ALUOp) -> (&'static str, OperandSize) { + fn op_name(alu_op: ALUOp) -> &'static str { match alu_op { - ALUOp::Add32 => ("add", OperandSize::Size32), - ALUOp::Add64 => ("add", OperandSize::Size64), - ALUOp::Sub32 => ("sub", OperandSize::Size32), - ALUOp::Sub64 => ("sub", OperandSize::Size64), - ALUOp::Orr32 => ("orr", OperandSize::Size32), - ALUOp::Orr64 => ("orr", OperandSize::Size64), - ALUOp::And32 => ("and", OperandSize::Size32), - ALUOp::And64 => ("and", OperandSize::Size64), - ALUOp::AndS32 => ("ands", OperandSize::Size32), - ALUOp::AndS64 => ("ands", OperandSize::Size64), - ALUOp::Eor32 => ("eor", OperandSize::Size32), - ALUOp::Eor64 => ("eor", OperandSize::Size64), - ALUOp::AddS32 => ("adds", OperandSize::Size32), - ALUOp::AddS64 => ("adds", OperandSize::Size64), - ALUOp::SubS32 => ("subs", OperandSize::Size32), - ALUOp::SubS64 => ("subs", OperandSize::Size64), - ALUOp::SMulH => ("smulh", OperandSize::Size64), - ALUOp::UMulH => ("umulh", OperandSize::Size64), - ALUOp::SDiv64 => ("sdiv", OperandSize::Size64), - ALUOp::UDiv64 => ("udiv", OperandSize::Size64), - ALUOp::AndNot32 => ("bic", OperandSize::Size32), - ALUOp::AndNot64 => ("bic", OperandSize::Size64), - ALUOp::OrrNot32 => ("orn", OperandSize::Size32), - ALUOp::OrrNot64 => ("orn", OperandSize::Size64), - ALUOp::EorNot32 => ("eon", OperandSize::Size32), - ALUOp::EorNot64 => ("eon", OperandSize::Size64), - ALUOp::RotR32 => ("ror", OperandSize::Size32), - ALUOp::RotR64 => ("ror", OperandSize::Size64), - ALUOp::Lsr32 => ("lsr", OperandSize::Size32), - ALUOp::Lsr64 => ("lsr", OperandSize::Size64), - ALUOp::Asr32 => ("asr", OperandSize::Size32), - ALUOp::Asr64 => ("asr", OperandSize::Size64), - ALUOp::Lsl32 => ("lsl", OperandSize::Size32), - ALUOp::Lsl64 => ("lsl", OperandSize::Size64), - ALUOp::Adc32 => ("adc", OperandSize::Size32), - ALUOp::Adc64 => ("adc", OperandSize::Size64), - ALUOp::AdcS32 => ("adcs", OperandSize::Size32), - ALUOp::AdcS64 => ("adcs", OperandSize::Size64), - ALUOp::Sbc32 => ("sbc", OperandSize::Size32), - ALUOp::Sbc64 => ("sbc", OperandSize::Size64), - ALUOp::SbcS32 => ("sbcs", OperandSize::Size32), - ALUOp::SbcS64 => ("sbcs", OperandSize::Size64), + ALUOp::Add => "add", + ALUOp::Sub => "sub", + ALUOp::Orr => "orr", + ALUOp::And => "and", + ALUOp::AndS => "ands", + ALUOp::Eor => "eor", + ALUOp::AddS => "adds", + ALUOp::SubS => "subs", + ALUOp::SMulH => "smulh", + ALUOp::UMulH => "umulh", + ALUOp::SDiv => "sdiv", + ALUOp::UDiv => "udiv", + ALUOp::AndNot => "bic", + ALUOp::OrrNot => "orn", + ALUOp::EorNot => "eon", + ALUOp::RotR => "ror", + ALUOp::Lsr => "lsr", + ALUOp::Asr => "asr", + ALUOp::Lsl => "lsl", + ALUOp::Adc => "adc", + ALUOp::AdcS => "adcs", + ALUOp::Sbc => "sbc", + ALUOp::SbcS => "sbcs", } } match self { &Inst::Nop0 => "nop-zero-len".to_string(), &Inst::Nop4 => "nop".to_string(), - &Inst::AluRRR { alu_op, rd, rn, rm } => { - let (op, size) = op_name_size(alu_op); + &Inst::AluRRR { + alu_op, + size, + rd, + rn, + rm, + } => { + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size); @@ -2176,15 +2164,16 @@ impl Inst { } &Inst::AluRRImm12 { alu_op, + size, rd, rn, ref imm12, } => { - let (op, size) = op_name_size(alu_op); + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); - if imm12.bits == 0 && alu_op == ALUOp::Add64 { + if imm12.bits == 0 && alu_op == ALUOp::Add && size.is64() { // special-case MOV (used for moving into SP). format!("mov {}, {}", rd, rn) } else { @@ -2194,11 +2183,12 @@ impl Inst { } &Inst::AluRRImmLogic { alu_op, + size, rd, rn, ref imml, } => { - let (op, size) = op_name_size(alu_op); + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); let imml = imml.show_rru(mb_rru); @@ -2206,11 +2196,12 @@ impl Inst { } &Inst::AluRRImmShift { alu_op, + size, rd, rn, ref immshift, } => { - let (op, size) = op_name_size(alu_op); + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); let immshift = immshift.show_rru(mb_rru); @@ -2218,12 +2209,13 @@ impl Inst { } &Inst::AluRRRShift { alu_op, + size, rd, rn, rm, ref shiftop, } => { - let (op, size) = op_name_size(alu_op); + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size); @@ -2232,12 +2224,13 @@ impl Inst { } &Inst::AluRRRExtend { alu_op, + size, rd, rn, rm, ref extendop, } => { - let (op, size) = op_name_size(alu_op); + let op = op_name(alu_op); let rd = show_ireg_sized(rd.to_reg(), mb_rru, size); let rn = show_ireg_sized(rn, mb_rru, size); let rm = show_ireg_sized(rm, mb_rru, size); @@ -3419,15 +3412,12 @@ impl Inst { } else { offset as u64 }; - let alu_op = if offset < 0 { - ALUOp::Sub64 - } else { - ALUOp::Add64 - }; + let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add }; if let Some((idx, extendop)) = index_reg { let add = Inst::AluRRRExtend { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd, rn: reg, rm: idx, @@ -3441,6 +3431,7 @@ impl Inst { } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { let add = Inst::AluRRImm12 { alu_op, + size: OperandSize::Size64, rd, rn: reg, imm12, @@ -3453,6 +3444,7 @@ impl Inst { } let add = Inst::AluRRR { alu_op, + size: OperandSize::Size64, rd, rn: reg, rm: tmp.to_reg(), diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index 6cab703753..fa7b178d4e 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -91,8 +91,8 @@ ;; the actual addition is `adds` followed by `adc` which comprises the ;; low/high bits of the result (with_flags - (add64_with_flags x_lo y_lo) - (adc64 x_hi y_hi)))) + (add_with_flags $I64 x_lo y_lo) + (adc $I64 x_hi y_hi)))) ;;;; Rules for `isub` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -142,8 +142,8 @@ ;; the actual subtraction is `subs` followed by `sbc` which comprises ;; the low/high bits of the result (with_flags - (sub64_with_flags x_lo y_lo) - (sbc64 x_hi y_hi)))) + (sub_with_flags $I64 x_lo y_lo) + (sbc $I64 x_hi y_hi)))) ;;;; Rules for `uadd_sat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -203,7 +203,7 @@ ;; madd dst_hi, x_lo, y_hi, dst_hi ;; madd dst_hi, x_hi, y_lo, dst_hi ;; madd dst_lo, x_lo, y_lo, zero - (dst_hi1 Reg (umulh x_lo y_lo)) + (dst_hi1 Reg (umulh $I64 x_lo y_lo)) (dst_hi2 Reg (madd64 x_lo y_hi dst_hi1)) (dst_hi Reg (madd64 x_hi y_lo dst_hi2)) (dst_lo Reg (madd64 x_lo y_lo (zero_reg))) @@ -358,28 +358,28 @@ ;;;; Rules for `smulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type $I64 (smulhi x y))) - (value_reg (smulh (put_in_reg x) (put_in_reg y)))) + (value_reg (smulh $I64 (put_in_reg x) (put_in_reg y)))) (rule (lower (has_type (fits_in_32 ty) (smulhi x y))) (let ( (x64 Reg (put_in_reg_sext64 x)) (y64 Reg (put_in_reg_sext64 y)) (mul Reg (madd64 x64 y64 (zero_reg))) - (result Reg (asr64_imm mul (imm_shift_from_u8 (ty_bits ty)))) + (result Reg (asr_imm $I64 mul (imm_shift_from_u8 (ty_bits ty)))) ) (value_reg result))) ;;;; Rules for `umulhi` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type $I64 (umulhi x y))) - (value_reg (umulh (put_in_reg x) (put_in_reg y)))) + (value_reg (umulh $I64 (put_in_reg x) (put_in_reg y)))) (rule (lower (has_type (fits_in_32 ty) (umulhi x y))) (let ( (x64 Reg (put_in_reg_zext64 x)) (y64 Reg (put_in_reg_zext64 y)) (mul Reg (madd64 x64 y64 (zero_reg))) - (result Reg (lsr64_imm mul (imm_shift_from_u8 (ty_bits ty)))) + (result Reg (lsr_imm $I64 mul (imm_shift_from_u8 (ty_bits ty)))) ) (value_reg result))) @@ -391,7 +391,7 @@ ;; Note that aarch64's `udiv` doesn't trap so to respect the semantics of ;; CLIF's `udiv` the check for zero needs to be manually performed. (rule (lower (has_type (fits_in_64 ty) (udiv x y))) - (value_reg (udiv64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y)))) + (value_reg (a64_udiv $I64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y)))) ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. (decl put_nonzero_in_reg_zext64 (Value) Reg) @@ -428,7 +428,7 @@ (x64 Reg (put_in_reg_sext64 x)) (y64 Reg (put_nonzero_in_reg_sext64 y)) (valid_x64 Reg (trap_if_div_overflow ty x64 y64)) - (result Reg (sdiv64 valid_x64 y64)) + (result Reg (a64_sdiv $I64 valid_x64 y64)) ) (value_reg result))) @@ -439,7 +439,7 @@ ;; Special case for `sdiv` where no checks are needed due to division by a ;; constant meaning the checks are always passed. (rule (lower (has_type (fits_in_64 ty) (sdiv x (def_inst (iconst (safe_divisor_from_imm64 y)))))) - (value_reg (sdiv64 (put_in_reg_sext64 x) (imm ty y)))) + (value_reg (a64_sdiv $I64 (put_in_reg_sext64 x) (imm ty y)))) ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. (decl put_nonzero_in_reg_sext64 (Value) Reg) @@ -470,7 +470,7 @@ (let ( (x64 Reg (put_in_reg_zext64 x)) (y64 Reg (put_nonzero_in_reg_zext64 y)) - (div Reg (udiv64 x64 y64)) + (div Reg (a64_udiv $I64 x64 y64)) (result Reg (msub64 div y64 x64)) ) (value_reg result))) @@ -479,7 +479,7 @@ (let ( (x64 Reg (put_in_reg_sext64 x)) (y64 Reg (put_nonzero_in_reg_sext64 y)) - (div Reg (sdiv64 x64 y64)) + (div Reg (a64_sdiv $I64 x64 y64)) (result Reg (msub64 div y64 x64)) ) (value_reg result))) @@ -537,7 +537,7 @@ (rule (lower (has_type $I128 (sextend x))) (let ( (lo Reg (put_in_reg_sext64 x)) - (hi Reg (asr64_imm lo (imm_shift_from_u8 63))) + (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63))) ) (value_regs lo hi))) @@ -554,7 +554,7 @@ lane (vector_size in) (size_from_ty $I64))) - (hi Reg (asr64_imm lo (imm_shift_from_u8 63))) + (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63))) ) (value_regs lo hi))) @@ -566,7 +566,7 @@ (lo Reg (mov_from_vec (put_in_reg vec) lane (VectorSize.Size64x2))) - (hi Reg (asr64_imm lo (imm_shift_from_u8 63))) + (hi Reg (asr_imm $I64 lo (imm_shift_from_u8 63))) ) (value_regs lo hi))) @@ -592,8 +592,8 @@ (x_regs ValueRegs (put_in_regs x)) (x_lo Reg (value_regs_get x_regs 0)) (x_hi Reg (value_regs_get x_regs 1)) - (new_lo Reg (orr_not64 (zero_reg) x_lo)) - (new_hi Reg (orr_not64 (zero_reg) x_hi)) + (new_lo Reg (orr_not $I64 (zero_reg) x_lo)) + (new_hi Reg (orr_not $I64 (zero_reg) x_hi)) ) (value_regs new_lo new_hi))) @@ -604,12 +604,12 @@ ;;;; Rules for `band` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (band x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.And32) ty x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.And) ty x y))) (rule (lower (has_type $I64 (band x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.And64) $I64 x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.And) $I64 x y))) -(rule (lower (has_type $I128 (band x y))) (i128_alu_bitop (ALUOp.And64) x y)) +(rule (lower (has_type $I128 (band x y))) (i128_alu_bitop (ALUOp.And) $I64 x y)) (rule (lower (has_type (vec128 ty) (band x y))) (value_reg (and_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) @@ -617,12 +617,12 @@ ;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (bor x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr32) ty x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr) ty x y))) (rule (lower (has_type $I64 (bor x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr64) $I64 x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.Orr) $I64 x y))) -(rule (lower (has_type $I128 (bor x y))) (i128_alu_bitop (ALUOp.Orr64) x y)) +(rule (lower (has_type $I128 (bor x y))) (i128_alu_bitop (ALUOp.Orr) $I64 x y)) (rule (lower (has_type (vec128 ty) (bor x y))) (value_reg (orr_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) @@ -630,12 +630,12 @@ ;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (bxor x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor32) ty x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor) ty x y))) (rule (lower (has_type $I64 (bxor x y))) - (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor64) $I64 x y))) + (value_reg (alu_rs_imm_logic_commutative (ALUOp.Eor) $I64 x y))) -(rule (lower (has_type $I128 (bxor x y))) (i128_alu_bitop (ALUOp.Eor64) x y)) +(rule (lower (has_type $I128 (bxor x y))) (i128_alu_bitop (ALUOp.Eor) $I64 x y)) (rule (lower (has_type (vec128 ty) (bxor x y))) (value_reg (eor_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) @@ -643,12 +643,12 @@ ;;;; Rules for `band_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (band_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.AndNot32) ty x y))) + (value_reg (alu_rs_imm_logic (ALUOp.AndNot) ty x y))) (rule (lower (has_type $I64 (band_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.AndNot64) $I64 x y))) + (value_reg (alu_rs_imm_logic (ALUOp.AndNot) $I64 x y))) -(rule (lower (has_type $I128 (band_not x y))) (i128_alu_bitop (ALUOp.AndNot64) x y)) +(rule (lower (has_type $I128 (band_not x y))) (i128_alu_bitop (ALUOp.AndNot) $I64 x y)) (rule (lower (has_type (vec128 ty) (band_not x y))) (value_reg (bic_vec (put_in_reg x) (put_in_reg y) (vector_size ty)))) @@ -656,32 +656,32 @@ ;;;; Rules for `bor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (bor_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.OrrNot32) ty x y))) + (value_reg (alu_rs_imm_logic (ALUOp.OrrNot) ty x y))) (rule (lower (has_type $I64 (bor_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.OrrNot64) $I64 x y))) + (value_reg (alu_rs_imm_logic (ALUOp.OrrNot) $I64 x y))) -(rule (lower (has_type $I128 (bor_not x y))) (i128_alu_bitop (ALUOp.OrrNot64) x y)) +(rule (lower (has_type $I128 (bor_not x y))) (i128_alu_bitop (ALUOp.OrrNot) $I64 x y)) ;;;; Rules for `bxor_not` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type (fits_in_32 ty) (bxor_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.EorNot32) ty x y))) + (value_reg (alu_rs_imm_logic (ALUOp.EorNot) $I32 x y))) (rule (lower (has_type $I64 (bxor_not x y))) - (value_reg (alu_rs_imm_logic (ALUOp.EorNot64) $I64 x y))) + (value_reg (alu_rs_imm_logic (ALUOp.EorNot) $I64 x y))) -(rule (lower (has_type $I128 (bxor_not x y))) (i128_alu_bitop (ALUOp.EorNot64) x y)) +(rule (lower (has_type $I128 (bxor_not x y))) (i128_alu_bitop (ALUOp.EorNot) $I64 x y)) ;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Shift for i8/i16/i32. (rule (lower (has_type (fits_in_32 ty) (ishl x y))) - (value_reg (do_shift (ALUOp.Lsl32) ty (put_in_reg x) y))) + (value_reg (do_shift (ALUOp.Lsl) ty (put_in_reg x) y))) ;; Shift for i64. (rule (lower (has_type $I64 (ishl x y))) - (value_reg (do_shift (ALUOp.Lsl64) $I64 (put_in_reg x) y))) + (value_reg (do_shift (ALUOp.Lsl) $I64 (put_in_reg x) y))) ;; Shift for i128. (rule (lower (has_type $I128 (ishl x y))) @@ -701,15 +701,15 @@ (let ( (src_lo Reg (value_regs_get src 0)) (src_hi Reg (value_regs_get src 1)) - (lo_lshift Reg (lsl64 src_lo amt)) - (hi_lshift Reg (lsl64 src_hi amt)) - (inv_amt Reg (orr_not32 (zero_reg) amt)) - (lo_rshift Reg (lsr64 (lsr64_imm src_lo (imm_shift_from_u8 1)) + (lo_lshift Reg (lsl $I64 src_lo amt)) + (hi_lshift Reg (lsl $I64 src_hi amt)) + (inv_amt Reg (orr_not $I32 (zero_reg) amt)) + (lo_rshift Reg (lsr $I64 (lsr_imm $I64 src_lo (imm_shift_from_u8 1)) inv_amt)) - (maybe_hi Reg (orr64 hi_lshift lo_rshift)) + (maybe_hi Reg (orr $I64 hi_lshift lo_rshift)) ) (with_flags_2 - (tst64_imm amt (u64_into_imm_logic $I64 64)) + (tst_imm $I64 amt (u64_into_imm_logic $I64 64)) (csel (Cond.Ne) (zero_reg) lo_lshift) (csel (Cond.Ne) lo_lshift maybe_hi)))) @@ -741,16 +741,16 @@ (rule (do_shift op (fits_in_16 ty) x y) (let ( (shift_amt Reg (value_regs_get (put_in_regs y) 0)) - (masked_shift_amt Reg (and32_imm shift_amt (shift_mask ty))) + (masked_shift_amt Reg (and_imm $I32 shift_amt (shift_mask ty))) ) - (alu_rrr op x masked_shift_amt))) + (alu_rrr op $I32 x masked_shift_amt))) (decl shift_mask (Type) ImmLogic) (extern constructor shift_mask shift_mask) ;; 32/64-bit shift base cases. -(rule (do_shift op $I32 x y) (alu_rrr op x (value_regs_get (put_in_regs y) 0))) -(rule (do_shift op $I64 x y) (alu_rrr op x (value_regs_get (put_in_regs y) 0))) +(rule (do_shift op $I32 x y) (alu_rrr op $I32 x (value_regs_get (put_in_regs y) 0))) +(rule (do_shift op $I64 x y) (alu_rrr op $I64 x (value_regs_get (put_in_regs y) 0))) ;; Special case for shifting by a constant value where the value can fit into an ;; `ImmShift`. @@ -759,17 +759,17 @@ ;; to ensure it's attempted first, otherwise the type-based filters on the ;; previous rules seem to take priority over this rule. (rule 1 (do_shift op ty x (def_inst (iconst (imm_shift_from_imm64 >( //============================================================================ // ALU instruction constructors. -pub(crate) fn alu_inst_imm12(op: ALUOp, rd: Writable, rn: Reg, rm: ResultRSEImm12) -> Inst { +pub(crate) fn alu_inst_imm12( + op: ALUOp, + ty: Type, + rd: Writable, + rn: Reg, + rm: ResultRSEImm12, +) -> Inst { + let size = OperandSize::from_ty(ty); match rm { ResultRSEImm12::Imm12(imm12) => Inst::AluRRImm12 { alu_op: op, + size, rd, rn, imm12, }, ResultRSEImm12::Reg(rm) => Inst::AluRRR { alu_op: op, + size, rd, rn, rm, }, ResultRSEImm12::RegShift(rm, shiftop) => Inst::AluRRRShift { alu_op: op, + size, rd, rn, rm, @@ -477,6 +487,7 @@ pub(crate) fn alu_inst_imm12(op: ALUOp, rd: Writable, rn: Reg, rm: ResultRS }, ResultRSEImm12::RegExtend(rm, extendop) => Inst::AluRRRExtend { alu_op: op, + size, rd, rn, rm, @@ -772,7 +783,8 @@ fn lower_add_addends>( reg }; ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd, rn: rd.to_reg(), rm: reg, @@ -781,7 +793,8 @@ fn lower_add_addends>( for (reg, extendop) in addends32 { assert!(reg != stack_reg()); ctx.emit(Inst::AluRRRExtend { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd, rn: rd.to_reg(), rm: reg, @@ -797,14 +810,16 @@ fn lower_add_immediate>(ctx: &mut C, dst: Writable, s // Otherwise, lower the constant first then add. if let Some(imm12) = Imm12::maybe_from_u64(imm as u64) { ctx.emit(Inst::AluRRImm12 { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: dst, rn: src, imm12, }); } else if let Some(imm12) = Imm12::maybe_from_u64(imm.wrapping_neg() as u64) { ctx.emit(Inst::AluRRImm12 { - alu_op: ALUOp::Sub64, + alu_op: ALUOp::Sub, + size: OperandSize::Size64, rd: dst, rn: src, imm12, @@ -812,7 +827,8 @@ fn lower_add_immediate>(ctx: &mut C, dst: Writable, s } else { lower_constant_u64(ctx, dst, imm as u64); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Add64, + alu_op: ALUOp::Add, + size: OperandSize::Size64, rd: dst, rn: dst.to_reg(), rm: src, @@ -1250,19 +1266,22 @@ pub(crate) fn lower_icmp>( // cset dst, {eq, ne} ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Eor64, + alu_op: ALUOp::Eor, + size: OperandSize::Size64, rd: tmp1, rn: lhs.regs()[0], rm: rhs.regs()[0], }); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Eor64, + alu_op: ALUOp::Eor, + size: OperandSize::Size64, rd: tmp2, rn: lhs.regs()[1], rm: rhs.regs()[1], }); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::AddS64, + alu_op: ALUOp::AddS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: tmp1.to_reg(), rm: tmp2.to_reg(), @@ -1277,13 +1296,15 @@ pub(crate) fn lower_icmp>( // cset dst, {vs, vc} ctx.emit(Inst::AluRRR { - alu_op: ALUOp::AddS64, + alu_op: ALUOp::AddS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: lhs.regs()[0], rm: rhs.regs()[0], }); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::AdcS64, + alu_op: ALUOp::AdcS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: lhs.regs()[1], rm: rhs.regs()[1], @@ -1300,14 +1321,16 @@ pub(crate) fn lower_icmp>( let unsigned_cond = lower_condcode(condcode.unsigned()); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: lhs.regs()[0], rm: rhs.regs()[0], }); materialize_bool_result(ctx, insn, tmp1, unsigned_cond); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: lhs.regs()[1], rm: rhs.regs()[1], @@ -1345,7 +1368,8 @@ pub(crate) fn lower_icmp>( }; ctx.emit(Inst::AluRRR { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn, rm, @@ -1391,7 +1415,7 @@ pub(crate) fn lower_icmp>( ExtendOp::SXTH }; let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap(); - ctx.emit(alu_inst_imm12(ALUOp::Sub32, tmp1, rn, rm)); + ctx.emit(alu_inst_imm12(ALUOp::Sub, I32, tmp1, rn, rm)); let out_cond = match condcode { IntCC::Overflow => Cond::Ne, @@ -1407,8 +1431,7 @@ pub(crate) fn lower_icmp>( (cond, rn, rm) }; - let alu_op = choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64); - ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, rm)); + ctx.emit(alu_inst_imm12(ALUOp::SubS, ty, writable_zero_reg(), rn, rm)); cond }; diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest index 4881ee55bb..188fd75ac5 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest +++ b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest @@ -1,4 +1,4 @@ src/clif.isle 9ea75a6f790b5c03 src/prelude.isle 73285cd431346d53 -src/isa/aarch64/inst.isle dafd813ba278ce19 -src/isa/aarch64/lower.isle 2d2e1e076a0c8a23 +src/isa/aarch64/inst.isle 4c176462894836e5 +src/isa/aarch64/lower.isle aff657984bf30686 diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs index df26cb8578..48f365f065 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs +++ b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs @@ -127,6 +127,7 @@ pub enum MInst { Nop4, AluRRR { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, rm: Reg, @@ -140,24 +141,28 @@ pub enum MInst { }, AluRRImm12 { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, imm12: Imm12, }, AluRRImmLogic { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, imml: ImmLogic, }, AluRRImmShift { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, immshift: ImmShift, }, AluRRRShift { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, rm: Reg, @@ -165,6 +170,7 @@ pub enum MInst { }, AluRRRExtend { alu_op: ALUOp, + size: OperandSize, rd: WritableReg, rn: Reg, rm: Reg, @@ -688,54 +694,35 @@ pub enum MInst { }, } -/// Internal type ALUOp: defined at src/isa/aarch64/inst.isle line 789. +/// Internal type ALUOp: defined at src/isa/aarch64/inst.isle line 795. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ALUOp { - Add32, - Add64, - Sub32, - Sub64, - Orr32, - Orr64, - OrrNot32, - OrrNot64, - And32, - And64, - AndS32, - AndS64, - AndNot32, - AndNot64, - Eor32, - Eor64, - EorNot32, - EorNot64, - AddS32, - AddS64, - SubS32, - SubS64, + Add, + Sub, + Orr, + OrrNot, + And, + AndS, + AndNot, + Eor, + EorNot, + AddS, + SubS, SMulH, UMulH, - SDiv64, - UDiv64, - RotR32, - RotR64, - Lsr32, - Lsr64, - Asr32, - Asr64, - Lsl32, - Lsl64, - Adc32, - Adc64, - AdcS32, - AdcS64, - Sbc32, - Sbc64, - SbcS32, - SbcS64, + SDiv, + UDiv, + RotR, + Lsr, + Asr, + Lsl, + Adc, + AdcS, + Sbc, + SbcS, } -/// Internal type ALUOp3: defined at src/isa/aarch64/inst.isle line 850. +/// Internal type ALUOp3: defined at src/isa/aarch64/inst.isle line 833. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ALUOp3 { MAdd32, @@ -744,7 +731,7 @@ pub enum ALUOp3 { MSub64, } -/// Internal type BitOp: defined at src/isa/aarch64/inst.isle line 893. +/// Internal type BitOp: defined at src/isa/aarch64/inst.isle line 876. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum BitOp { RBit32, @@ -755,7 +742,7 @@ pub enum BitOp { Cls64, } -/// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 959. +/// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 947. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp1 { Abs32, @@ -768,7 +755,7 @@ pub enum FPUOp1 { Cvt64To32, } -/// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 972. +/// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 960. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp2 { Add32, @@ -789,14 +776,14 @@ pub enum FPUOp2 { Uqsub64, } -/// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 997. +/// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 985. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp3 { MAdd32, MAdd64, } -/// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 1004. +/// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 992. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FpuToIntOp { F32ToU32, @@ -809,7 +796,7 @@ pub enum FpuToIntOp { F64ToI64, } -/// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1017. +/// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1005. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum IntToFpuOp { U32ToF32, @@ -822,7 +809,7 @@ pub enum IntToFpuOp { I64ToF64, } -/// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1031. +/// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1019. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FpuRoundMode { Minus32, @@ -835,7 +822,7 @@ pub enum FpuRoundMode { Nearest64, } -/// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1044. +/// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1032. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecExtendOp { Sxtl8, @@ -846,7 +833,7 @@ pub enum VecExtendOp { Uxtl32, } -/// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1061. +/// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1049. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecALUOp { Sqadd, @@ -888,7 +875,7 @@ pub enum VecALUOp { Sqrdmulh, } -/// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1140. +/// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1128. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecMisc2 { Not, @@ -910,7 +897,7 @@ pub enum VecMisc2 { Cmeq0, } -/// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1179. +/// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1167. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecRRLongOp { Fcvtl16, @@ -920,7 +907,7 @@ pub enum VecRRLongOp { Shll32, } -/// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1194. +/// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1182. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecRRNarrowOp { Xtn16, @@ -939,7 +926,7 @@ pub enum VecRRNarrowOp { Fcvtn64, } -/// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1226. +/// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1214. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecRRRLongOp { Smull8, @@ -953,13 +940,13 @@ pub enum VecRRRLongOp { Umlal32, } -/// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1243. +/// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1231. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecPairOp { Addp, } -/// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1251. +/// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1239. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecRRPairLongOp { Saddlp8, @@ -968,14 +955,14 @@ pub enum VecRRPairLongOp { Uaddlp16, } -/// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1262. +/// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1250. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecLanesOp { Addv, Uminv, } -/// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1271. +/// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1259. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum VecShiftImmOp { Shl, @@ -983,7 +970,7 @@ pub enum VecShiftImmOp { Sshr, } -/// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1282. +/// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1270. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum AtomicRMWOp { Add, @@ -1143,34 +1130,50 @@ pub fn constructor_with_flags_2( return None; } +// Generated as internal constructor for term operand_size. +pub fn constructor_operand_size(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { + // Rule at src/isa/aarch64/inst.isle line 898. + let expr0_0 = OperandSize::Size32; + return Some(expr0_0); + } + if let Some(pattern1_0) = C::fits_in_64(ctx, pattern0_0) { + // Rule at src/isa/aarch64/inst.isle line 899. + let expr0_0 = OperandSize::Size64; + return Some(expr0_0); + } + return None; +} + // Generated as internal constructor for term vector_size. pub fn constructor_vector_size(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some((pattern1_0, pattern1_1)) = C::multi_lane(ctx, pattern0_0) { if pattern1_0 == 8 { if pattern1_1 == 16 { - // Rule at src/isa/aarch64/inst.isle line 953. + // Rule at src/isa/aarch64/inst.isle line 941. let expr0_0 = VectorSize::Size8x16; return Some(expr0_0); } } if pattern1_0 == 16 { if pattern1_1 == 8 { - // Rule at src/isa/aarch64/inst.isle line 954. + // Rule at src/isa/aarch64/inst.isle line 942. let expr0_0 = VectorSize::Size16x8; return Some(expr0_0); } } if pattern1_0 == 32 { if pattern1_1 == 4 { - // Rule at src/isa/aarch64/inst.isle line 955. + // Rule at src/isa/aarch64/inst.isle line 943. let expr0_0 = VectorSize::Size32x4; return Some(expr0_0); } } if pattern1_0 == 64 { if pattern1_1 == 2 { - // Rule at src/isa/aarch64/inst.isle line 956. + // Rule at src/isa/aarch64/inst.isle line 944. let expr0_0 = VectorSize::Size64x2; return Some(expr0_0); } @@ -1187,7 +1190,7 @@ pub fn constructor_movz( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1378. + // Rule at src/isa/aarch64/inst.isle line 1366. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovZ { @@ -1208,7 +1211,7 @@ pub fn constructor_movn( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1385. + // Rule at src/isa/aarch64/inst.isle line 1373. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovN { @@ -1225,72 +1228,84 @@ pub fn constructor_movn( pub fn constructor_alu_rr_imm_logic( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, - arg2: ImmLogic, + arg1: Type, + arg2: Reg, + arg3: ImmLogic, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1392. + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1380. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRImmLogic { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRImmLogic { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - imml: pattern2_0, + rn: pattern2_0, + imml: pattern3_0, }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term alu_rr_imm_shift. pub fn constructor_alu_rr_imm_shift( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, - arg2: ImmShift, + arg1: Type, + arg2: Reg, + arg3: ImmShift, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1399. + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1387. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRImmShift { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRImmShift { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - immshift: pattern2_0, + rn: pattern2_0, + immshift: pattern3_0, }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term alu_rrr. pub fn constructor_alu_rrr( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, + arg1: Type, arg2: Reg, + arg3: Reg, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1406. + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1394. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRR { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRR { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - rm: pattern2_0, + rn: pattern2_0, + rm: pattern3_0, }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term vec_rrr. @@ -1305,7 +1320,7 @@ pub fn constructor_vec_rrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1413. + // Rule at src/isa/aarch64/inst.isle line 1401. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRR { @@ -1330,7 +1345,7 @@ pub fn constructor_vec_lanes( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1420. + // Rule at src/isa/aarch64/inst.isle line 1408. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecLanes { @@ -1348,7 +1363,7 @@ pub fn constructor_vec_lanes( pub fn constructor_vec_dup(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1427. + // Rule at src/isa/aarch64/inst.isle line 1415. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecDup { @@ -1365,94 +1380,109 @@ pub fn constructor_vec_dup(ctx: &mut C, arg0: Reg, arg1: &VectorSize pub fn constructor_alu_rr_imm12( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, - arg2: Imm12, + arg1: Type, + arg2: Reg, + arg3: Imm12, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1434. + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1422. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRImm12 { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRImm12 { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - imm12: pattern2_0, + rn: pattern2_0, + imm12: pattern3_0, }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term alu_rrr_shift. pub fn constructor_alu_rrr_shift( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, + arg1: Type, arg2: Reg, - arg3: ShiftOpAndAmt, + arg3: Reg, + arg4: ShiftOpAndAmt, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1441. + let pattern4_0 = arg4; + // Rule at src/isa/aarch64/inst.isle line 1429. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRRShift { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRRShift { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - rm: pattern2_0, - shiftop: pattern3_0, + rn: pattern2_0, + rm: pattern3_0, + shiftop: pattern4_0, }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term alu_rrr_extend. pub fn constructor_alu_rrr_extend( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, + arg1: Type, arg2: Reg, - arg3: &ExtendOp, + arg3: Reg, + arg4: &ExtendOp, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1448. + let pattern4_0 = arg4; + // Rule at src/isa/aarch64/inst.isle line 1436. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = MInst::AluRRRExtend { + let expr2_0 = constructor_operand_size(ctx, pattern1_0)?; + let expr3_0 = MInst::AluRRRExtend { alu_op: pattern0_0.clone(), + size: expr2_0, rd: expr1_0, - rn: pattern1_0, - rm: pattern2_0, - extendop: pattern3_0.clone(), + rn: pattern2_0, + rm: pattern3_0, + extendop: pattern4_0.clone(), }; - let expr3_0 = C::emit(ctx, &expr2_0); - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - return Some(expr4_0); + let expr4_0 = C::emit(ctx, &expr3_0); + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + return Some(expr5_0); } // Generated as internal constructor for term alu_rr_extend_reg. pub fn constructor_alu_rr_extend_reg( ctx: &mut C, arg0: &ALUOp, - arg1: Reg, - arg2: &ExtendedValue, + arg1: Type, + arg2: Reg, + arg3: &ExtendedValue, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1456. - let expr0_0 = C::put_extended_in_reg(ctx, pattern2_0); - let expr1_0 = C::get_extended_op(ctx, pattern2_0); - let expr2_0 = constructor_alu_rrr_extend(ctx, pattern0_0, pattern1_0, expr0_0, &expr1_0)?; + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1444. + let expr0_0 = C::put_extended_in_reg(ctx, pattern3_0); + let expr1_0 = C::get_extended_op(ctx, pattern3_0); + let expr2_0 = + constructor_alu_rrr_extend(ctx, pattern0_0, pattern1_0, pattern2_0, expr0_0, &expr1_0)?; return Some(expr2_0); } @@ -1468,7 +1498,7 @@ pub fn constructor_alu_rrrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1463. + // Rule at src/isa/aarch64/inst.isle line 1451. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRRR { @@ -1487,7 +1517,7 @@ pub fn constructor_alu_rrrr( pub fn constructor_bit_rr(ctx: &mut C, arg0: &BitOp, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1470. + // Rule at src/isa/aarch64/inst.isle line 1458. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::BitRR { @@ -1500,78 +1530,94 @@ pub fn constructor_bit_rr(ctx: &mut C, arg0: &BitOp, arg1: Reg) -> O return Some(expr4_0); } -// Generated as internal constructor for term add64_with_flags. -pub fn constructor_add64_with_flags( +// Generated as internal constructor for term add_with_flags. +pub fn constructor_add_with_flags( ctx: &mut C, - arg0: Reg, + arg0: Type, arg1: Reg, + arg2: Reg, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1477. + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1465. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = ALUOp::AddS64; - let expr3_0 = MInst::AluRRR { + let expr2_0 = ALUOp::AddS; + let expr3_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr4_0 = MInst::AluRRR { alu_op: expr2_0, + size: expr3_0, rd: expr1_0, - rn: pattern0_0, - rm: pattern1_0, + rn: pattern1_0, + rm: pattern2_0, }; - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - let expr5_0 = ProducesFlags::ProducesFlags { - inst: expr3_0, - result: expr4_0, + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + let expr6_0 = ProducesFlags::ProducesFlags { + inst: expr4_0, + result: expr5_0, }; - return Some(expr5_0); + return Some(expr6_0); } -// Generated as internal constructor for term adc64. -pub fn constructor_adc64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term adc. +pub fn constructor_adc( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1484. + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1472. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = ALUOp::Adc64; - let expr3_0 = MInst::AluRRR { + let expr2_0 = ALUOp::Adc; + let expr3_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr4_0 = MInst::AluRRR { alu_op: expr2_0, + size: expr3_0, rd: expr1_0, - rn: pattern0_0, - rm: pattern1_0, + rn: pattern1_0, + rm: pattern2_0, }; - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - let expr5_0 = ConsumesFlags::ConsumesFlags { - inst: expr3_0, - result: expr4_0, + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + let expr6_0 = ConsumesFlags::ConsumesFlags { + inst: expr4_0, + result: expr5_0, }; - return Some(expr5_0); + return Some(expr6_0); } -// Generated as internal constructor for term sub64_with_flags. -pub fn constructor_sub64_with_flags( +// Generated as internal constructor for term sub_with_flags. +pub fn constructor_sub_with_flags( ctx: &mut C, - arg0: Reg, + arg0: Type, arg1: Reg, + arg2: Reg, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1491. + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1479. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = ALUOp::SubS64; - let expr3_0 = MInst::AluRRR { + let expr2_0 = ALUOp::SubS; + let expr3_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr4_0 = MInst::AluRRR { alu_op: expr2_0, + size: expr3_0, rd: expr1_0, - rn: pattern0_0, - rm: pattern1_0, + rn: pattern1_0, + rm: pattern2_0, }; - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - let expr5_0 = ProducesFlags::ProducesFlags { - inst: expr3_0, - result: expr4_0, + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + let expr6_0 = ProducesFlags::ProducesFlags { + inst: expr4_0, + result: expr5_0, }; - return Some(expr5_0); + return Some(expr6_0); } // Generated as internal constructor for term cmp64_imm. @@ -1582,45 +1628,55 @@ pub fn constructor_cmp64_imm( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1497. - let expr0_0 = ALUOp::SubS64; - let expr1_0 = C::writable_zero_reg(ctx); - let expr2_0 = MInst::AluRRImm12 { + // Rule at src/isa/aarch64/inst.isle line 1485. + let expr0_0 = ALUOp::SubS; + let expr1_0 = OperandSize::Size64; + let expr2_0 = C::writable_zero_reg(ctx); + let expr3_0 = MInst::AluRRImm12 { alu_op: expr0_0, - rd: expr1_0, + size: expr1_0, + rd: expr2_0, rn: pattern0_0, imm12: pattern1_0, }; - let expr3_0 = C::zero_reg(ctx); - let expr4_0 = ProducesFlags::ProducesFlags { - inst: expr2_0, - result: expr3_0, - }; - return Some(expr4_0); -} - -// Generated as internal constructor for term sbc64. -pub fn constructor_sbc64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1503. - let expr0_0: Type = I64; - let expr1_0 = C::temp_writable_reg(ctx, expr0_0); - let expr2_0 = ALUOp::Sbc64; - let expr3_0 = MInst::AluRRR { - alu_op: expr2_0, - rd: expr1_0, - rn: pattern0_0, - rm: pattern1_0, - }; - let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0); - let expr5_0 = ConsumesFlags::ConsumesFlags { + let expr4_0 = C::zero_reg(ctx); + let expr5_0 = ProducesFlags::ProducesFlags { inst: expr3_0, result: expr4_0, }; return Some(expr5_0); } +// Generated as internal constructor for term sbc. +pub fn constructor_sbc( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1491. + let expr0_0: Type = I64; + let expr1_0 = C::temp_writable_reg(ctx, expr0_0); + let expr2_0 = ALUOp::Sbc; + let expr3_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr4_0 = MInst::AluRRR { + alu_op: expr2_0, + size: expr3_0, + rd: expr1_0, + rn: pattern1_0, + rm: pattern2_0, + }; + let expr5_0 = C::writable_reg_to_reg(ctx, expr1_0); + let expr6_0 = ConsumesFlags::ConsumesFlags { + inst: expr4_0, + result: expr5_0, + }; + return Some(expr6_0); +} + // Generated as internal constructor for term vec_misc. pub fn constructor_vec_misc( ctx: &mut C, @@ -1631,7 +1687,7 @@ pub fn constructor_vec_misc( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1510. + // Rule at src/isa/aarch64/inst.isle line 1498. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecMisc { @@ -1657,7 +1713,7 @@ pub fn constructor_vec_rrr_long( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1517. + // Rule at src/isa/aarch64/inst.isle line 1505. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRRLong { @@ -1686,7 +1742,7 @@ pub fn constructor_vec_rrrr_long( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/aarch64/inst.isle line 1527. + // Rule at src/isa/aarch64/inst.isle line 1515. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuMove128 { @@ -1716,7 +1772,7 @@ pub fn constructor_vec_rr_narrow( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1535. + // Rule at src/isa/aarch64/inst.isle line 1523. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRNarrow { @@ -1740,7 +1796,7 @@ pub fn constructor_vec_rr_long( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1542. + // Rule at src/isa/aarch64/inst.isle line 1530. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRLong { @@ -1762,7 +1818,7 @@ pub fn constructor_mov_to_fpu( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1549. + // Rule at src/isa/aarch64/inst.isle line 1537. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovToFpu { @@ -1787,7 +1843,7 @@ pub fn constructor_mov_to_vec( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1556. + // Rule at src/isa/aarch64/inst.isle line 1544. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuMove128 { @@ -1816,7 +1872,7 @@ pub fn constructor_mov_from_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1564. + // Rule at src/isa/aarch64/inst.isle line 1552. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovFromVec { @@ -1842,7 +1898,7 @@ pub fn constructor_mov_from_vec_signed( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1571. + // Rule at src/isa/aarch64/inst.isle line 1559. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovFromVecSigned { @@ -1869,7 +1925,7 @@ pub fn constructor_extend( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1578. + // Rule at src/isa/aarch64/inst.isle line 1566. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::Extend { @@ -1888,7 +1944,7 @@ pub fn constructor_extend( pub fn constructor_load_acquire(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1585. + // Rule at src/isa/aarch64/inst.isle line 1573. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadAcquire { @@ -1901,29 +1957,33 @@ pub fn constructor_load_acquire(ctx: &mut C, arg0: Type, arg1: Reg) return Some(expr4_0); } -// Generated as internal constructor for term tst64_imm. -pub fn constructor_tst64_imm( +// Generated as internal constructor for term tst_imm. +pub fn constructor_tst_imm( ctx: &mut C, - arg0: Reg, - arg1: ImmLogic, + arg0: Type, + arg1: Reg, + arg2: ImmLogic, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1595. - let expr0_0 = ALUOp::AndS64; - let expr1_0 = C::writable_zero_reg(ctx); - let expr2_0 = MInst::AluRRImmLogic { + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1583. + let expr0_0 = ALUOp::AndS; + let expr1_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr2_0 = C::writable_zero_reg(ctx); + let expr3_0 = MInst::AluRRImmLogic { alu_op: expr0_0, - rd: expr1_0, - rn: pattern0_0, - imml: pattern1_0, + size: expr1_0, + rd: expr2_0, + rn: pattern1_0, + imml: pattern2_0, }; - let expr3_0 = C::invalid_reg(ctx); - let expr4_0 = ProducesFlags::ProducesFlags { - inst: expr2_0, - result: expr3_0, + let expr4_0 = C::invalid_reg(ctx); + let expr5_0 = ProducesFlags::ProducesFlags { + inst: expr3_0, + result: expr4_0, }; - return Some(expr4_0); + return Some(expr5_0); } // Generated as internal constructor for term csel. @@ -1936,7 +1996,7 @@ pub fn constructor_csel( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1608. + // Rule at src/isa/aarch64/inst.isle line 1597. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::CSel { @@ -1955,41 +2015,12 @@ pub fn constructor_csel( // Generated as internal constructor for term add. pub fn constructor_add(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1617. - let expr0_0 = constructor_add64(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1616. - let expr0_0 = constructor_add32(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term add32. -pub fn constructor_add32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1620. - let expr0_0 = ALUOp::Add32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term add64. -pub fn constructor_add64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1623. - let expr0_0 = ALUOp::Add64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1605. + let expr0_0 = ALUOp::Add; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2000,41 +2031,12 @@ pub fn constructor_add_imm( arg1: Reg, arg2: Imm12, ) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1627. - let expr0_0 = constructor_add64_imm(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1626. - let expr0_0 = constructor_add32_imm(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term add32_imm. -pub fn constructor_add32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1630. - let expr0_0 = ALUOp::Add32; - let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term add64_imm. -pub fn constructor_add64_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1633. - let expr0_0 = ALUOp::Add64; - let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1608. + let expr0_0 = ALUOp::Add; + let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2044,50 +2046,13 @@ pub fn constructor_add_extend( arg0: Type, arg1: Reg, arg2: &ExtendedValue, -) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1637. - let expr0_0 = constructor_add64_extend(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1636. - let expr0_0 = constructor_add32_extend(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term add32_extend. -pub fn constructor_add32_extend( - ctx: &mut C, - arg0: Reg, - arg1: &ExtendedValue, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1640. - let expr0_0 = ALUOp::Add32; - let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term add64_extend. -pub fn constructor_add64_extend( - ctx: &mut C, - arg0: Reg, - arg1: &ExtendedValue, -) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1643. - let expr0_0 = ALUOp::Add64; - let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1611. + let expr0_0 = ALUOp::Add; + let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2098,56 +2063,16 @@ pub fn constructor_add_shift( arg1: Reg, arg2: Reg, arg3: ShiftOpAndAmt, -) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1647. - let expr0_0 = constructor_add64_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1646. - let expr0_0 = constructor_add32_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term add32_shift. -pub fn constructor_add32_shift( - ctx: &mut C, - arg0: Reg, - arg1: Reg, - arg2: ShiftOpAndAmt, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1650. - let expr0_0 = ALUOp::Add32; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term add64_shift. -pub fn constructor_add64_shift( - ctx: &mut C, - arg0: Reg, - arg1: Reg, - arg2: ShiftOpAndAmt, -) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1653. - let expr0_0 = ALUOp::Add64; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1614. + let expr0_0 = ALUOp::Add; + let expr1_0 = constructor_alu_rrr_shift( + ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0, pattern3_0, + )?; return Some(expr1_0); } @@ -2161,7 +2086,7 @@ pub fn constructor_add_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1656. + // Rule at src/isa/aarch64/inst.isle line 1617. let expr0_0 = VecALUOp::Add; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2169,41 +2094,12 @@ pub fn constructor_add_vec( // Generated as internal constructor for term sub. pub fn constructor_sub(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1662. - let expr0_0 = constructor_sub64(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1661. - let expr0_0 = constructor_sub32(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term sub32. -pub fn constructor_sub32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1665. - let expr0_0 = ALUOp::Sub32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term sub64. -pub fn constructor_sub64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1668. - let expr0_0 = ALUOp::Sub64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1622. + let expr0_0 = ALUOp::Sub; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2214,41 +2110,12 @@ pub fn constructor_sub_imm( arg1: Reg, arg2: Imm12, ) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1672. - let expr0_0 = constructor_sub64_imm(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1671. - let expr0_0 = constructor_sub32_imm(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term sub32_imm. -pub fn constructor_sub32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1675. - let expr0_0 = ALUOp::Sub32; - let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term sub64_imm. -pub fn constructor_sub64_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1678. - let expr0_0 = ALUOp::Sub64; - let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1625. + let expr0_0 = ALUOp::Sub; + let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2258,50 +2125,13 @@ pub fn constructor_sub_extend( arg0: Type, arg1: Reg, arg2: &ExtendedValue, -) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1682. - let expr0_0 = constructor_sub64_extend(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1681. - let expr0_0 = constructor_sub32_extend(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term sub32_extend. -pub fn constructor_sub32_extend( - ctx: &mut C, - arg0: Reg, - arg1: &ExtendedValue, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1685. - let expr0_0 = ALUOp::Sub32; - let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term sub64_extend. -pub fn constructor_sub64_extend( - ctx: &mut C, - arg0: Reg, - arg1: &ExtendedValue, -) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1688. - let expr0_0 = ALUOp::Sub64; - let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1628. + let expr0_0 = ALUOp::Sub; + let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2312,56 +2142,16 @@ pub fn constructor_sub_shift( arg1: Reg, arg2: Reg, arg3: ShiftOpAndAmt, -) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1692. - let expr0_0 = constructor_sub64_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1691. - let expr0_0 = constructor_sub32_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term sub32_shift. -pub fn constructor_sub32_shift( - ctx: &mut C, - arg0: Reg, - arg1: Reg, - arg2: ShiftOpAndAmt, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1695. - let expr0_0 = ALUOp::Sub32; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term sub64_shift. -pub fn constructor_sub64_shift( - ctx: &mut C, - arg0: Reg, - arg1: Reg, - arg2: ShiftOpAndAmt, -) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1698. - let expr0_0 = ALUOp::Sub64; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1631. + let expr0_0 = ALUOp::Sub; + let expr1_0 = constructor_alu_rrr_shift( + ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0, pattern3_0, + )?; return Some(expr1_0); } @@ -2375,7 +2165,7 @@ pub fn constructor_sub_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1701. + // Rule at src/isa/aarch64/inst.isle line 1634. let expr0_0 = VecALUOp::Sub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2394,7 +2184,7 @@ pub fn constructor_madd( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1707. + // Rule at src/isa/aarch64/inst.isle line 1640. let expr0_0 = constructor_madd64(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2402,7 +2192,7 @@ pub fn constructor_madd( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1706. + // Rule at src/isa/aarch64/inst.isle line 1639. let expr0_0 = constructor_madd32(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2414,7 +2204,7 @@ pub fn constructor_madd32(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1710. + // Rule at src/isa/aarch64/inst.isle line 1643. let expr0_0 = ALUOp3::MAdd32; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2425,7 +2215,7 @@ pub fn constructor_madd64(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1713. + // Rule at src/isa/aarch64/inst.isle line 1646. let expr0_0 = ALUOp3::MAdd64; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2436,7 +2226,7 @@ pub fn constructor_msub64(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1718. + // Rule at src/isa/aarch64/inst.isle line 1651. let expr0_0 = ALUOp3::MSub64; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2452,7 +2242,7 @@ pub fn constructor_uqadd( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1722. + // Rule at src/isa/aarch64/inst.isle line 1655. let expr0_0 = VecALUOp::Uqadd; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2468,7 +2258,7 @@ pub fn constructor_sqadd( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1726. + // Rule at src/isa/aarch64/inst.isle line 1659. let expr0_0 = VecALUOp::Sqadd; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2484,7 +2274,7 @@ pub fn constructor_uqsub( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1730. + // Rule at src/isa/aarch64/inst.isle line 1663. let expr0_0 = VecALUOp::Uqsub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2500,29 +2290,31 @@ pub fn constructor_sqsub( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1734. + // Rule at src/isa/aarch64/inst.isle line 1667. let expr0_0 = VecALUOp::Sqsub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } // Generated as internal constructor for term umulh. -pub fn constructor_umulh(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +pub fn constructor_umulh(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1738. + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1671. let expr0_0 = ALUOp::UMulH; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } // Generated as internal constructor for term smulh. -pub fn constructor_smulh(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +pub fn constructor_smulh(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1742. + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1675. let expr0_0 = ALUOp::SMulH; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2536,7 +2328,7 @@ pub fn constructor_mul( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1746. + // Rule at src/isa/aarch64/inst.isle line 1679. let expr0_0 = VecALUOp::Mul; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2546,7 +2338,7 @@ pub fn constructor_mul( pub fn constructor_neg(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1750. + // Rule at src/isa/aarch64/inst.isle line 1683. let expr0_0 = VecMisc2::Neg; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2556,7 +2348,7 @@ pub fn constructor_neg(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> pub fn constructor_rev64(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1754. + // Rule at src/isa/aarch64/inst.isle line 1687. let expr0_0 = VecMisc2::Rev64; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2566,7 +2358,7 @@ pub fn constructor_rev64(ctx: &mut C, arg0: Reg, arg1: &VectorSize) pub fn constructor_xtn64(ctx: &mut C, arg0: Reg, arg1: bool) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1758. + // Rule at src/isa/aarch64/inst.isle line 1691. let expr0_0 = VecRRNarrowOp::Xtn64; let expr1_0 = constructor_vec_rr_narrow(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2582,7 +2374,7 @@ pub fn constructor_addp( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1762. + // Rule at src/isa/aarch64/inst.isle line 1695. let expr0_0 = VecALUOp::Addp; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2592,7 +2384,7 @@ pub fn constructor_addp( pub fn constructor_addv(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1766. + // Rule at src/isa/aarch64/inst.isle line 1699. let expr0_0 = VecLanesOp::Addv; let expr1_0 = constructor_vec_lanes(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2602,7 +2394,7 @@ pub fn constructor_addv(ctx: &mut C, arg0: Reg, arg1: &VectorSize) - pub fn constructor_shll32(ctx: &mut C, arg0: Reg, arg1: bool) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1770. + // Rule at src/isa/aarch64/inst.isle line 1703. let expr0_0 = VecRRLongOp::Shll32; let expr1_0 = constructor_vec_rr_long(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2620,7 +2412,7 @@ pub fn constructor_umlal32( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1774. + // Rule at src/isa/aarch64/inst.isle line 1707. let expr0_0 = VecRRRLongOp::Umlal32; let expr1_0 = constructor_vec_rrrr_long( ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0, pattern3_0, @@ -2638,7 +2430,7 @@ pub fn constructor_smull8( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1778. + // Rule at src/isa/aarch64/inst.isle line 1711. let expr0_0 = VecRRRLongOp::Smull8; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2654,7 +2446,7 @@ pub fn constructor_umull8( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1782. + // Rule at src/isa/aarch64/inst.isle line 1715. let expr0_0 = VecRRRLongOp::Umull8; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2670,7 +2462,7 @@ pub fn constructor_smull16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1786. + // Rule at src/isa/aarch64/inst.isle line 1719. let expr0_0 = VecRRRLongOp::Smull16; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2686,7 +2478,7 @@ pub fn constructor_umull16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1790. + // Rule at src/isa/aarch64/inst.isle line 1723. let expr0_0 = VecRRRLongOp::Umull16; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2702,7 +2494,7 @@ pub fn constructor_smull32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1794. + // Rule at src/isa/aarch64/inst.isle line 1727. let expr0_0 = VecRRRLongOp::Smull32; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2718,129 +2510,122 @@ pub fn constructor_umull32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1798. + // Rule at src/isa/aarch64/inst.isle line 1731. let expr0_0 = VecRRRLongOp::Umull32; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term asr64. -pub fn constructor_asr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term asr. +pub fn constructor_asr(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1802. - let expr0_0 = ALUOp::Asr64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1735. + let expr0_0 = ALUOp::Asr; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term asr64_imm. -pub fn constructor_asr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { +// Generated as internal constructor for term asr_imm. +pub fn constructor_asr_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: ImmShift, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1805. - let expr0_0 = ALUOp::Asr64; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1738. + let expr0_0 = ALUOp::Asr; + let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsr32. -pub fn constructor_lsr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term lsr. +pub fn constructor_lsr(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1809. - let expr0_0 = ALUOp::Lsr32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1742. + let expr0_0 = ALUOp::Lsr; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsr32_imm. -pub fn constructor_lsr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { +// Generated as internal constructor for term lsr_imm. +pub fn constructor_lsr_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: ImmShift, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1812. - let expr0_0 = ALUOp::Lsr32; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1745. + let expr0_0 = ALUOp::Lsr; + let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsr64. -pub fn constructor_lsr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term lsl. +pub fn constructor_lsl(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1815. - let expr0_0 = ALUOp::Lsr64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1749. + let expr0_0 = ALUOp::Lsl; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsr64_imm. -pub fn constructor_lsr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { +// Generated as internal constructor for term lsl_imm. +pub fn constructor_lsl_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: ImmShift, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1818. - let expr0_0 = ALUOp::Lsr64; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1752. + let expr0_0 = ALUOp::Lsl; + let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsl32. -pub fn constructor_lsl32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term a64_udiv. +pub fn constructor_a64_udiv( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1822. - let expr0_0 = ALUOp::Lsl32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1756. + let expr0_0 = ALUOp::UDiv; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term lsl32_imm. -pub fn constructor_lsl32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { +// Generated as internal constructor for term a64_sdiv. +pub fn constructor_a64_sdiv( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1825. - let expr0_0 = ALUOp::Lsl32; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term lsl64. -pub fn constructor_lsl64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1828. - let expr0_0 = ALUOp::Lsl64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term lsl64_imm. -pub fn constructor_lsl64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1831. - let expr0_0 = ALUOp::Lsl64; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term udiv64. -pub fn constructor_udiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1835. - let expr0_0 = ALUOp::UDiv64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term sdiv64. -pub fn constructor_sdiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1839. - let expr0_0 = ALUOp::SDiv64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1760. + let expr0_0 = ALUOp::SDiv; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2848,7 +2633,7 @@ pub fn constructor_sdiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Opti pub fn constructor_not(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1843. + // Rule at src/isa/aarch64/inst.isle line 1764. let expr0_0 = VecMisc2::Not; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2861,41 +2646,12 @@ pub fn constructor_orr_not( arg1: Reg, arg2: Reg, ) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1849. - let expr0_0 = constructor_orr_not64(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1848. - let expr0_0 = constructor_orr_not32(ctx, pattern2_0, pattern3_0)?; - return Some(expr0_0); - } - return None; -} - -// Generated as internal constructor for term orr_not32. -pub fn constructor_orr_not32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1852. - let expr0_0 = ALUOp::OrrNot32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr_not64. -pub fn constructor_orr_not64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1855. - let expr0_0 = ALUOp::OrrNot64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1769. + let expr0_0 = ALUOp::OrrNot; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -2908,94 +2664,41 @@ pub fn constructor_orr_not_shift( arg3: ShiftOpAndAmt, ) -> Option { let pattern0_0 = arg0; - if pattern0_0 == I64 { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1859. - let expr0_0 = constructor_orr_not_shift64(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - let pattern2_0 = arg1; - let pattern3_0 = arg2; - let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1858. - let expr0_0 = constructor_orr_not_shift32(ctx, pattern2_0, pattern3_0, pattern4_0)?; - return Some(expr0_0); - } - return None; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1772. + let expr0_0 = ALUOp::OrrNot; + let expr1_0 = constructor_alu_rrr_shift( + ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0, pattern3_0, + )?; + return Some(expr1_0); } -// Generated as internal constructor for term orr_not_shift32. -pub fn constructor_orr_not_shift32( +// Generated as internal constructor for term orr. +pub fn constructor_orr(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1777. + let expr0_0 = ALUOp::Orr; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term orr_imm. +pub fn constructor_orr_imm( ctx: &mut C, - arg0: Reg, + arg0: Type, arg1: Reg, - arg2: ShiftOpAndAmt, + arg2: ImmLogic, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1862. - let expr0_0 = ALUOp::OrrNot32; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr_not_shift64. -pub fn constructor_orr_not_shift64( - ctx: &mut C, - arg0: Reg, - arg1: Reg, - arg2: ShiftOpAndAmt, -) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1865. - let expr0_0 = ALUOp::OrrNot64; - let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr32. -pub fn constructor_orr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1870. - let expr0_0 = ALUOp::Orr32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr32_imm. -pub fn constructor_orr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1873. - let expr0_0 = ALUOp::Orr32; - let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr64. -pub fn constructor_orr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1876. - let expr0_0 = ALUOp::Orr64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term orr64_imm. -pub fn constructor_orr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1879. - let expr0_0 = ALUOp::Orr64; - let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; + // Rule at src/isa/aarch64/inst.isle line 1780. + let expr0_0 = ALUOp::Orr; + let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -3009,19 +2712,25 @@ pub fn constructor_orr_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1882. + // Rule at src/isa/aarch64/inst.isle line 1783. let expr0_0 = VecALUOp::Orr; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term and32_imm. -pub fn constructor_and32_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { +// Generated as internal constructor for term and_imm. +pub fn constructor_and_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: ImmLogic, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1887. - let expr0_0 = ALUOp::And32; - let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1788. + let expr0_0 = ALUOp::And; + let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -3035,7 +2744,7 @@ pub fn constructor_and_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1890. + // Rule at src/isa/aarch64/inst.isle line 1791. let expr0_0 = VecALUOp::And; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3051,7 +2760,7 @@ pub fn constructor_eor_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1894. + // Rule at src/isa/aarch64/inst.isle line 1795. let expr0_0 = VecALUOp::Eor; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3067,7 +2776,7 @@ pub fn constructor_bic_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1898. + // Rule at src/isa/aarch64/inst.isle line 1799. let expr0_0 = VecALUOp::Bic; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3083,7 +2792,7 @@ pub fn constructor_sshl( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1902. + // Rule at src/isa/aarch64/inst.isle line 1803. let expr0_0 = VecALUOp::Sshl; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3099,56 +2808,48 @@ pub fn constructor_ushl( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1906. + // Rule at src/isa/aarch64/inst.isle line 1807. let expr0_0 = VecALUOp::Ushl; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term rotr32. -pub fn constructor_rotr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term a64_rotr. +pub fn constructor_a64_rotr( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1911. - let expr0_0 = ALUOp::RotR32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1812. + let expr0_0 = ALUOp::RotR; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } -// Generated as internal constructor for term rotr32_imm. -pub fn constructor_rotr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { +// Generated as internal constructor for term a64_rotr_imm. +pub fn constructor_a64_rotr_imm( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: ImmShift, +) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1914. - let expr0_0 = ALUOp::RotR32; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term rotr64. -pub fn constructor_rotr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1917. - let expr0_0 = ALUOp::RotR64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term rotr64_imm. -pub fn constructor_rotr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1920. - let expr0_0 = ALUOp::RotR64; - let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1815. + let expr0_0 = ALUOp::RotR; + let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } // Generated as internal constructor for term rbit32. pub fn constructor_rbit32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1925. + // Rule at src/isa/aarch64/inst.isle line 1820. let expr0_0 = BitOp::RBit32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3157,7 +2858,7 @@ pub fn constructor_rbit32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term rbit64. pub fn constructor_rbit64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1928. + // Rule at src/isa/aarch64/inst.isle line 1823. let expr0_0 = BitOp::RBit64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3166,7 +2867,7 @@ pub fn constructor_rbit64(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term clz32. pub fn constructor_clz32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1933. + // Rule at src/isa/aarch64/inst.isle line 1828. let expr0_0 = BitOp::Clz32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3175,7 +2876,7 @@ pub fn constructor_clz32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term clz64. pub fn constructor_clz64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1936. + // Rule at src/isa/aarch64/inst.isle line 1831. let expr0_0 = BitOp::Clz64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3184,7 +2885,7 @@ pub fn constructor_clz64(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term cls32. pub fn constructor_cls32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1941. + // Rule at src/isa/aarch64/inst.isle line 1836. let expr0_0 = BitOp::Cls32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3193,29 +2894,20 @@ pub fn constructor_cls32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term cls64. pub fn constructor_cls64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1944. + // Rule at src/isa/aarch64/inst.isle line 1839. let expr0_0 = BitOp::Cls64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); } -// Generated as internal constructor for term eon32. -pub fn constructor_eon32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { +// Generated as internal constructor for term eon. +pub fn constructor_eon(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1949. - let expr0_0 = ALUOp::EorNot32; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; - return Some(expr1_0); -} - -// Generated as internal constructor for term eon64. -pub fn constructor_eon64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { - let pattern0_0 = arg0; - let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1952. - let expr0_0 = ALUOp::EorNot64; - let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; + let pattern2_0 = arg2; + // Rule at src/isa/aarch64/inst.isle line 1844. + let expr0_0 = ALUOp::EorNot; + let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } @@ -3223,7 +2915,7 @@ pub fn constructor_eon64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_vec_cnt(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1957. + // Rule at src/isa/aarch64/inst.isle line 1849. let expr0_0 = VecMisc2::Cnt; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3240,25 +2932,26 @@ pub fn constructor_imm(ctx: &mut C, arg0: Type, arg1: u64) -> Option }; if let Some(pattern3_0) = closure3() { if let Some(pattern4_0) = C::imm_logic_from_u64(ctx, pattern2_0, pattern3_0) { - // Rule at src/isa/aarch64/inst.isle line 1972. - let expr0_0 = C::zero_reg(ctx); - let expr1_0 = constructor_orr64_imm(ctx, expr0_0, pattern4_0)?; - return Some(expr1_0); + // Rule at src/isa/aarch64/inst.isle line 1864. + let expr0_0: Type = I64; + let expr1_0 = C::zero_reg(ctx); + let expr2_0 = constructor_orr_imm(ctx, expr0_0, expr1_0, pattern4_0)?; + return Some(expr2_0); } } if let Some(pattern3_0) = C::move_wide_const_from_u64(ctx, pattern2_0) { - // Rule at src/isa/aarch64/inst.isle line 1964. + // Rule at src/isa/aarch64/inst.isle line 1856. let expr0_0 = OperandSize::Size64; let expr1_0 = constructor_movz(ctx, pattern3_0, &expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::move_wide_const_from_negated_u64(ctx, pattern2_0) { - // Rule at src/isa/aarch64/inst.isle line 1968. + // Rule at src/isa/aarch64/inst.isle line 1860. let expr0_0 = OperandSize::Size64; let expr1_0 = constructor_movn(ctx, pattern3_0, &expr0_0)?; return Some(expr1_0); } - // Rule at src/isa/aarch64/inst.isle line 1979. + // Rule at src/isa/aarch64/inst.isle line 1871. let expr0_0 = C::load_constant64_full(ctx, pattern2_0); return Some(expr0_0); } @@ -3270,17 +2963,17 @@ pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I32 { - // Rule at src/isa/aarch64/inst.isle line 1990. + // Rule at src/isa/aarch64/inst.isle line 1882. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 1991. + // Rule at src/isa/aarch64/inst.isle line 1883. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 1986. + // Rule at src/isa/aarch64/inst.isle line 1878. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = true; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3296,17 +2989,17 @@ pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I32 { - // Rule at src/isa/aarch64/inst.isle line 1999. + // Rule at src/isa/aarch64/inst.isle line 1891. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2000. + // Rule at src/isa/aarch64/inst.isle line 1892. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 1995. + // Rule at src/isa/aarch64/inst.isle line 1887. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = false; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3322,12 +3015,12 @@ pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2008. + // Rule at src/isa/aarch64/inst.isle line 1900. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 2004. + // Rule at src/isa/aarch64/inst.isle line 1896. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = true; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3343,12 +3036,12 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2016. + // Rule at src/isa/aarch64/inst.isle line 1908. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 2012. + // Rule at src/isa/aarch64/inst.isle line 1904. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = false; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3362,7 +3055,7 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op // Generated as internal constructor for term trap_if_zero_divisor. pub fn constructor_trap_if_zero_divisor(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 2021. + // Rule at src/isa/aarch64/inst.isle line 1913. let expr0_0 = C::cond_br_zero(ctx, pattern0_0); let expr1_0 = C::trap_code_division_by_zero(ctx); let expr2_0 = MInst::TrapIf { @@ -3377,12 +3070,12 @@ pub fn constructor_trap_if_zero_divisor(ctx: &mut C, arg0: Reg) -> O pub fn constructor_size_from_ty(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2027. + // Rule at src/isa/aarch64/inst.isle line 1919. let expr0_0 = OperandSize::Size64; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/inst.isle line 2026. + // Rule at src/isa/aarch64/inst.isle line 1918. let expr0_0 = OperandSize::Size32; return Some(expr0_0); } @@ -3399,62 +3092,48 @@ pub fn constructor_trap_if_div_overflow( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 2033. - let expr0_0 = constructor_adds_op(ctx, pattern0_0)?; - let expr1_0 = C::writable_zero_reg(ctx); - let expr2_0: u8 = 1; - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = MInst::AluRRImm12 { + // Rule at src/isa/aarch64/inst.isle line 1925. + let expr0_0 = ALUOp::AddS; + let expr1_0 = constructor_operand_size(ctx, pattern0_0)?; + let expr2_0 = C::writable_zero_reg(ctx); + let expr3_0: u8 = 1; + let expr4_0 = C::u8_into_imm12(ctx, expr3_0); + let expr5_0 = MInst::AluRRImm12 { alu_op: expr0_0, - rd: expr1_0, + size: expr1_0, + rd: expr2_0, rn: pattern2_0, - imm12: expr3_0, + imm12: expr4_0, }; - let expr5_0 = C::emit(ctx, &expr4_0); - let expr6_0 = constructor_size_from_ty(ctx, pattern0_0)?; - let expr7_0: u8 = 1; - let expr8_0 = C::u8_into_uimm5(ctx, expr7_0); - let expr9_0: bool = false; + let expr6_0 = C::emit(ctx, &expr5_0); + let expr7_0 = constructor_size_from_ty(ctx, pattern0_0)?; + let expr8_0: u8 = 1; + let expr9_0 = C::u8_into_uimm5(ctx, expr8_0); let expr10_0: bool = false; let expr11_0: bool = false; let expr12_0: bool = false; - let expr13_0 = C::nzcv(ctx, expr9_0, expr10_0, expr11_0, expr12_0); - let expr14_0 = Cond::Eq; - let expr15_0 = MInst::CCmpImm { - size: expr6_0, + let expr13_0: bool = false; + let expr14_0 = C::nzcv(ctx, expr10_0, expr11_0, expr12_0, expr13_0); + let expr15_0 = Cond::Eq; + let expr16_0 = MInst::CCmpImm { + size: expr7_0, rn: pattern1_0, - imm: expr8_0, - nzcv: expr13_0, - cond: expr14_0, + imm: expr9_0, + nzcv: expr14_0, + cond: expr15_0, }; - let expr16_0 = C::emit(ctx, &expr15_0); - let expr17_0 = Cond::Vs; - let expr18_0 = C::cond_br_cond(ctx, &expr17_0); - let expr19_0 = C::trap_code_integer_overflow(ctx); - let expr20_0 = MInst::TrapIf { - kind: expr18_0, - trap_code: expr19_0, + let expr17_0 = C::emit(ctx, &expr16_0); + let expr18_0 = Cond::Vs; + let expr19_0 = C::cond_br_cond(ctx, &expr18_0); + let expr20_0 = C::trap_code_integer_overflow(ctx); + let expr21_0 = MInst::TrapIf { + kind: expr19_0, + trap_code: expr20_0, }; - let expr21_0 = C::emit(ctx, &expr20_0); + let expr22_0 = C::emit(ctx, &expr21_0); return Some(pattern1_0); } -// Generated as internal constructor for term adds_op. -pub fn constructor_adds_op(ctx: &mut C, arg0: Type) -> Option { - let pattern0_0 = arg0; - if pattern0_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2053. - let expr0_0 = ALUOp::AddS64; - return Some(expr0_0); - } - if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/inst.isle line 2052. - let expr0_0 = ALUOp::AddS32; - return Some(expr0_0); - } - return None; -} - // Generated as internal constructor for term alu_rs_imm_logic_commutative. pub fn constructor_alu_rs_imm_logic_commutative( ctx: &mut C, @@ -3482,10 +3161,11 @@ pub fn constructor_alu_rs_imm_logic_commutative( C::imm_logic_from_imm64(ctx, pattern5_1, pattern7_0) { let pattern9_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 2083. + // Rule at src/isa/aarch64/inst.isle line 1970. let expr0_0 = C::put_in_reg(ctx, pattern9_0); - let expr1_0 = - constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern8_0)?; + let expr1_0 = constructor_alu_rr_imm_logic( + ctx, pattern0_0, pattern1_0, expr0_0, pattern8_0, + )?; return Some(expr1_0); } } @@ -3513,12 +3193,13 @@ pub fn constructor_alu_rs_imm_logic_commutative( C::lshl_from_imm64(ctx, pattern10_1, pattern12_0) { let pattern14_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 2089. + // Rule at src/isa/aarch64/inst.isle line 1976. let expr0_0 = C::put_in_reg(ctx, pattern14_0); let expr1_0 = C::put_in_reg(ctx, pattern7_0); let expr2_0 = constructor_alu_rrr_shift( ctx, pattern0_0, + pattern1_0, expr0_0, expr1_0, pattern13_0, @@ -3550,10 +3231,11 @@ pub fn constructor_alu_rs_imm_logic_commutative( if let Some(pattern9_0) = C::imm_logic_from_imm64(ctx, pattern6_1, pattern8_0) { - // Rule at src/isa/aarch64/inst.isle line 2081. + // Rule at src/isa/aarch64/inst.isle line 1968. let expr0_0 = C::put_in_reg(ctx, pattern2_0); - let expr1_0 = - constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern9_0)?; + let expr1_0 = constructor_alu_rr_imm_logic( + ctx, pattern0_0, pattern1_0, expr0_0, pattern9_0, + )?; return Some(expr1_0); } } @@ -3580,12 +3262,13 @@ pub fn constructor_alu_rs_imm_logic_commutative( if let Some(pattern14_0) = C::lshl_from_imm64(ctx, pattern11_1, pattern13_0) { - // Rule at src/isa/aarch64/inst.isle line 2087. + // Rule at src/isa/aarch64/inst.isle line 1974. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern8_0); let expr2_0 = constructor_alu_rrr_shift( ctx, pattern0_0, + pattern1_0, expr0_0, expr1_0, pattern14_0, @@ -3601,10 +3284,10 @@ pub fn constructor_alu_rs_imm_logic_commutative( _ => {} } } - // Rule at src/isa/aarch64/inst.isle line 2077. + // Rule at src/isa/aarch64/inst.isle line 1964. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern3_0); - let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, expr1_0)?; + let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, pattern1_0, expr0_0, expr1_0)?; return Some(expr2_0); } @@ -3635,10 +3318,11 @@ pub fn constructor_alu_rs_imm_logic( if let Some(pattern9_0) = C::imm_logic_from_imm64(ctx, pattern6_1, pattern8_0) { - // Rule at src/isa/aarch64/inst.isle line 2097. + // Rule at src/isa/aarch64/inst.isle line 1984. let expr0_0 = C::put_in_reg(ctx, pattern2_0); - let expr1_0 = - constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern9_0)?; + let expr1_0 = constructor_alu_rr_imm_logic( + ctx, pattern0_0, pattern1_0, expr0_0, pattern9_0, + )?; return Some(expr1_0); } } @@ -3665,12 +3349,13 @@ pub fn constructor_alu_rs_imm_logic( if let Some(pattern14_0) = C::lshl_from_imm64(ctx, pattern11_1, pattern13_0) { - // Rule at src/isa/aarch64/inst.isle line 2099. + // Rule at src/isa/aarch64/inst.isle line 1986. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern8_0); let expr2_0 = constructor_alu_rrr_shift( ctx, pattern0_0, + pattern1_0, expr0_0, expr1_0, pattern14_0, @@ -3686,10 +3371,10 @@ pub fn constructor_alu_rs_imm_logic( _ => {} } } - // Rule at src/isa/aarch64/inst.isle line 2095. + // Rule at src/isa/aarch64/inst.isle line 1982. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern3_0); - let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, expr1_0)?; + let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, pattern1_0, expr0_0, expr1_0)?; return Some(expr2_0); } @@ -3697,25 +3382,27 @@ pub fn constructor_alu_rs_imm_logic( pub fn constructor_i128_alu_bitop( ctx: &mut C, arg0: &ALUOp, - arg1: Value, + arg1: Type, arg2: Value, + arg3: Value, ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 2107. - let expr0_0 = C::put_in_regs(ctx, pattern1_0); + let pattern3_0 = arg3; + // Rule at src/isa/aarch64/inst.isle line 1994. + let expr0_0 = C::put_in_regs(ctx, pattern2_0); let expr1_0: usize = 0; let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); let expr3_0: usize = 1; let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0); - let expr5_0 = C::put_in_regs(ctx, pattern2_0); + let expr5_0 = C::put_in_regs(ctx, pattern3_0); let expr6_0: usize = 0; let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0); let expr8_0: usize = 1; let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0); - let expr10_0 = constructor_alu_rrr(ctx, pattern0_0, expr2_0, expr7_0)?; - let expr11_0 = constructor_alu_rrr(ctx, pattern0_0, expr4_0, expr9_0)?; + let expr10_0 = constructor_alu_rrr(ctx, pattern0_0, pattern1_0, expr2_0, expr7_0)?; + let expr11_0 = constructor_alu_rrr(ctx, pattern0_0, pattern1_0, expr4_0, expr9_0)?; let expr12_0 = C::value_regs(ctx, expr10_0, expr11_0); return Some(expr12_0); } @@ -3735,45 +3422,49 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { // Rule at src/isa/aarch64/lower.isle line 1013. - let expr0_0 = C::put_in_reg(ctx, pattern5_1); - let expr1_0 = constructor_rbit32(ctx, expr0_0)?; - let expr2_0: u8 = 24; - let expr3_0 = C::imm_shift_from_u8(ctx, expr2_0); - let expr4_0 = constructor_lsr32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_rbit32(ctx, expr1_0)?; + let expr3_0: u8 = 24; + let expr4_0 = C::imm_shift_from_u8(ctx, expr3_0); + let expr5_0 = constructor_lsr_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Clz => { // Rule at src/isa/aarch64/lower.isle line 1038. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; - let expr1_0 = constructor_clz32(ctx, expr0_0)?; - let expr2_0: u8 = 24; - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = constructor_sub32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; + let expr2_0 = constructor_clz32(ctx, expr1_0)?; + let expr3_0: u8 = 24; + let expr4_0 = C::u8_into_imm12(ctx, expr3_0); + let expr5_0 = constructor_sub_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Cls => { // Rule at src/isa/aarch64/lower.isle line 1095. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; - let expr1_0 = constructor_cls32(ctx, expr0_0)?; - let expr2_0: u8 = 24; - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = constructor_sub32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; + let expr2_0 = constructor_cls32(ctx, expr1_0)?; + let expr3_0: u8 = 24; + let expr4_0 = C::u8_into_imm12(ctx, expr3_0); + let expr5_0 = constructor_sub_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Ctz => { // Rule at src/isa/aarch64/lower.isle line 1073. - let expr0_0 = C::put_in_reg(ctx, pattern5_1); - let expr1_0 = constructor_rbit32(ctx, expr0_0)?; - let expr2_0: Type = I32; - let expr3_0: u64 = 8388608; - let expr4_0 = C::u64_into_imm_logic(ctx, expr2_0, expr3_0); - let expr5_0 = constructor_orr32_imm(ctx, expr1_0, expr4_0)?; - let expr6_0 = constructor_clz32(ctx, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); + let expr0_0: Type = I32; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_rbit32(ctx, expr1_0)?; + let expr3_0: Type = I32; + let expr4_0: u64 = 8388608; + let expr5_0 = C::u64_into_imm_logic(ctx, expr3_0, expr4_0); + let expr6_0 = constructor_orr_imm(ctx, expr0_0, expr2_0, expr5_0)?; + let expr7_0 = constructor_clz32(ctx, expr6_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); } &Opcode::Popcnt => { // Rule at src/isa/aarch64/lower.isle line 1152. @@ -3802,45 +3493,49 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { // Rule at src/isa/aarch64/lower.isle line 1019. - let expr0_0 = C::put_in_reg(ctx, pattern5_1); - let expr1_0 = constructor_rbit32(ctx, expr0_0)?; - let expr2_0: u8 = 16; - let expr3_0 = C::imm_shift_from_u8(ctx, expr2_0); - let expr4_0 = constructor_lsr32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_rbit32(ctx, expr1_0)?; + let expr3_0: u8 = 16; + let expr4_0 = C::imm_shift_from_u8(ctx, expr3_0); + let expr5_0 = constructor_lsr_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Clz => { // Rule at src/isa/aarch64/lower.isle line 1041. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; - let expr1_0 = constructor_clz32(ctx, expr0_0)?; - let expr2_0: u8 = 16; - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = constructor_sub32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; + let expr2_0 = constructor_clz32(ctx, expr1_0)?; + let expr3_0: u8 = 16; + let expr4_0 = C::u8_into_imm12(ctx, expr3_0); + let expr5_0 = constructor_sub_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Cls => { // Rule at src/isa/aarch64/lower.isle line 1098. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; - let expr1_0 = constructor_cls32(ctx, expr0_0)?; - let expr2_0: u8 = 16; - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = constructor_sub32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); + let expr0_0: Type = I32; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern5_1)?; + let expr2_0 = constructor_cls32(ctx, expr1_0)?; + let expr3_0: u8 = 16; + let expr4_0 = C::u8_into_imm12(ctx, expr3_0); + let expr5_0 = constructor_sub_imm(ctx, expr0_0, expr2_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); } &Opcode::Ctz => { // Rule at src/isa/aarch64/lower.isle line 1076. - let expr0_0 = C::put_in_reg(ctx, pattern5_1); - let expr1_0 = constructor_rbit32(ctx, expr0_0)?; - let expr2_0: Type = I32; - let expr3_0: u64 = 32768; - let expr4_0 = C::u64_into_imm_logic(ctx, expr2_0, expr3_0); - let expr5_0 = constructor_orr32_imm(ctx, expr1_0, expr4_0)?; - let expr6_0 = constructor_clz32(ctx, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); + let expr0_0: Type = I32; + let expr1_0 = C::put_in_reg(ctx, pattern5_1); + let expr2_0 = constructor_rbit32(ctx, expr1_0)?; + let expr3_0: Type = I32; + let expr4_0: u64 = 32768; + let expr5_0 = C::u64_into_imm_logic(ctx, expr3_0, expr4_0); + let expr6_0 = constructor_orr_imm(ctx, expr0_0, expr2_0, expr5_0)?; + let expr7_0 = constructor_clz32(ctx, expr6_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); } &Opcode::Popcnt => { // Rule at src/isa/aarch64/lower.isle line 1160. @@ -3891,27 +3586,31 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -3935,25 +3634,28 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option {} } @@ -4026,27 +3728,29 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 360. - let expr0_0 = C::put_in_reg(ctx, pattern7_0); - let expr1_0 = C::put_in_reg(ctx, pattern7_1); - let expr2_0 = constructor_smulh(ctx, expr0_0, expr1_0)?; - let expr3_0 = C::value_reg(ctx, expr2_0); - return Some(expr3_0); + let expr0_0: Type = I64; + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = C::put_in_reg(ctx, pattern7_1); + let expr3_0 = constructor_smulh(ctx, expr0_0, expr1_0, expr2_0)?; + let expr4_0 = C::value_reg(ctx, expr3_0); + return Some(expr4_0); } &Opcode::Band => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 609. - let expr0_0 = ALUOp::And64; + let expr0_0 = ALUOp::And; let expr1_0: Type = I64; let expr2_0 = constructor_alu_rs_imm_logic_commutative( ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, @@ -4058,7 +3762,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -4180,31 +3888,34 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 683. - let expr0_0 = ALUOp::Lsl64; + let expr0_0 = ALUOp::Lsl; let expr1_0: Type = I64; let expr2_0 = C::put_in_reg(ctx, pattern7_0); let expr3_0 = @@ -4216,7 +3927,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -4336,10 +4050,13 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -4355,67 +4072,80 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 612. - let expr0_0 = ALUOp::And64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::And; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::Bor => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 625. - let expr0_0 = ALUOp::Orr64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::Orr; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::Bxor => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 638. - let expr0_0 = ALUOp::Eor64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::Eor; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::BandNot => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 651. - let expr0_0 = ALUOp::AndNot64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::AndNot; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::BorNot => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 664. - let expr0_0 = ALUOp::OrrNot64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::OrrNot; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::BxorNot => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 674. - let expr0_0 = ALUOp::EorNot64; - let expr1_0 = - constructor_i128_alu_bitop(ctx, &expr0_0, pattern7_0, pattern7_1)?; - return Some(expr1_0); + let expr0_0 = ALUOp::EorNot; + let expr1_0: Type = I64; + let expr2_0 = constructor_i128_alu_bitop( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, + )?; + return Some(expr2_0); } &Opcode::Rotl => { let (pattern7_0, pattern7_1) = @@ -4426,23 +4156,26 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -4453,23 +4186,26 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -4519,12 +4255,14 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { // Rule at src/isa/aarch64/lower.isle line 1028. @@ -4553,24 +4291,27 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { // Rule at src/isa/aarch64/lower.isle line 1085. @@ -4671,12 +4412,14 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option {} } @@ -5318,13 +5064,14 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); @@ -5385,11 +5132,12 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); @@ -5398,16 +5146,17 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 606. - let expr0_0 = ALUOp::And32; + let expr0_0 = ALUOp::And; let expr1_0 = constructor_alu_rs_imm_logic_commutative( ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, )?; @@ -5417,7 +5166,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 619. - let expr0_0 = ALUOp::Orr32; + let expr0_0 = ALUOp::Orr; let expr1_0 = constructor_alu_rs_imm_logic_commutative( ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, )?; @@ -5427,7 +5176,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 632. - let expr0_0 = ALUOp::Eor32; + let expr0_0 = ALUOp::Eor; let expr1_0 = constructor_alu_rs_imm_logic_commutative( ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, )?; @@ -5437,7 +5186,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 645. - let expr0_0 = ALUOp::AndNot32; + let expr0_0 = ALUOp::AndNot; let expr1_0 = constructor_alu_rs_imm_logic( ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, )?; @@ -5447,7 +5196,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 658. - let expr0_0 = ALUOp::OrrNot32; + let expr0_0 = ALUOp::OrrNot; let expr1_0 = constructor_alu_rs_imm_logic( ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, )?; @@ -5457,17 +5206,18 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 668. - let expr0_0 = ALUOp::EorNot32; - let expr1_0 = constructor_alu_rs_imm_logic( - ctx, &expr0_0, pattern3_0, pattern7_0, pattern7_1, + let expr0_0 = ALUOp::EorNot; + let expr1_0: Type = I32; + let expr2_0 = constructor_alu_rs_imm_logic( + ctx, &expr0_0, expr1_0, pattern7_0, pattern7_1, )?; - let expr2_0 = C::value_reg(ctx, expr1_0); - return Some(expr2_0); + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); } &Opcode::Ishl => { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 679. - let expr0_0 = ALUOp::Lsl32; + let expr0_0 = ALUOp::Lsl; let expr1_0 = C::put_in_reg(ctx, pattern7_0); let expr2_0 = constructor_do_shift(ctx, &expr0_0, pattern3_0, expr1_0, pattern7_1)?; @@ -5477,7 +5227,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 767. - let expr0_0 = ALUOp::Lsr32; + let expr0_0 = ALUOp::Lsr; let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; let expr2_0 = constructor_do_shift(ctx, &expr0_0, pattern3_0, expr1_0, pattern7_1)?; @@ -5487,7 +5237,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 816. - let expr0_0 = ALUOp::Asr32; + let expr0_0 = ALUOp::Asr; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; let expr2_0 = constructor_do_shift(ctx, &expr0_0, pattern3_0, expr1_0, pattern7_1)?; @@ -5887,11 +5637,12 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -5908,14 +5659,16 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -5936,10 +5690,11 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = @@ -5947,10 +5702,11 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option {} } @@ -6230,28 +5986,30 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); // Rule at src/isa/aarch64/lower.isle line 830. let expr0_0 = constructor_vector_size(ctx, pattern3_0)?; - let expr1_0 = C::zero_reg(ctx); - let expr2_0 = C::put_in_reg(ctx, pattern7_1); - let expr3_0 = constructor_sub32(ctx, expr1_0, expr2_0)?; - let expr4_0 = constructor_vec_dup(ctx, expr3_0, &expr0_0)?; - let expr5_0 = C::put_in_reg(ctx, pattern7_0); - let expr6_0 = constructor_sshl(ctx, expr5_0, expr4_0, &expr0_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); + let expr1_0: Type = I32; + let expr2_0 = C::zero_reg(ctx); + let expr3_0 = C::put_in_reg(ctx, pattern7_1); + let expr4_0 = constructor_sub(ctx, expr1_0, expr2_0, expr3_0)?; + let expr5_0 = constructor_vec_dup(ctx, expr4_0, &expr0_0)?; + let expr6_0 = C::put_in_reg(ctx, pattern7_0); + let expr7_0 = constructor_sshl(ctx, expr6_0, expr5_0, &expr0_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); } _ => {} } @@ -6371,26 +6129,33 @@ pub fn constructor_lower_shl128( let expr1_0 = C::value_regs_get(ctx, pattern0_0, expr0_0); let expr2_0: usize = 1; let expr3_0 = C::value_regs_get(ctx, pattern0_0, expr2_0); - let expr4_0 = constructor_lsl64(ctx, expr1_0, pattern1_0)?; - let expr5_0 = constructor_lsl64(ctx, expr3_0, pattern1_0)?; - let expr6_0 = C::zero_reg(ctx); - let expr7_0 = constructor_orr_not32(ctx, expr6_0, pattern1_0)?; - let expr8_0: u8 = 1; - let expr9_0 = C::imm_shift_from_u8(ctx, expr8_0); - let expr10_0 = constructor_lsr64_imm(ctx, expr1_0, expr9_0)?; - let expr11_0 = constructor_lsr64(ctx, expr10_0, expr7_0)?; - let expr12_0 = constructor_orr64(ctx, expr5_0, expr11_0)?; - let expr13_0: Type = I64; - let expr14_0: u64 = 64; - let expr15_0 = C::u64_into_imm_logic(ctx, expr13_0, expr14_0); - let expr16_0 = constructor_tst64_imm(ctx, pattern1_0, expr15_0)?; - let expr17_0 = Cond::Ne; - let expr18_0 = C::zero_reg(ctx); - let expr19_0 = constructor_csel(ctx, &expr17_0, expr18_0, expr4_0)?; - let expr20_0 = Cond::Ne; - let expr21_0 = constructor_csel(ctx, &expr20_0, expr4_0, expr12_0)?; - let expr22_0 = constructor_with_flags_2(ctx, &expr16_0, &expr19_0, &expr21_0)?; - return Some(expr22_0); + let expr4_0: Type = I64; + let expr5_0 = constructor_lsl(ctx, expr4_0, expr1_0, pattern1_0)?; + let expr6_0: Type = I64; + let expr7_0 = constructor_lsl(ctx, expr6_0, expr3_0, pattern1_0)?; + let expr8_0: Type = I32; + let expr9_0 = C::zero_reg(ctx); + let expr10_0 = constructor_orr_not(ctx, expr8_0, expr9_0, pattern1_0)?; + let expr11_0: Type = I64; + let expr12_0: Type = I64; + let expr13_0: u8 = 1; + let expr14_0 = C::imm_shift_from_u8(ctx, expr13_0); + let expr15_0 = constructor_lsr_imm(ctx, expr12_0, expr1_0, expr14_0)?; + let expr16_0 = constructor_lsr(ctx, expr11_0, expr15_0, expr10_0)?; + let expr17_0: Type = I64; + let expr18_0 = constructor_orr(ctx, expr17_0, expr7_0, expr16_0)?; + let expr19_0: Type = I64; + let expr20_0: Type = I64; + let expr21_0: u64 = 64; + let expr22_0 = C::u64_into_imm_logic(ctx, expr20_0, expr21_0); + let expr23_0 = constructor_tst_imm(ctx, expr19_0, pattern1_0, expr22_0)?; + let expr24_0 = Cond::Ne; + let expr25_0 = C::zero_reg(ctx); + let expr26_0 = constructor_csel(ctx, &expr24_0, expr25_0, expr5_0)?; + let expr27_0 = Cond::Ne; + let expr28_0 = constructor_csel(ctx, &expr27_0, expr5_0, expr18_0)?; + let expr29_0 = constructor_with_flags_2(ctx, &expr23_0, &expr26_0, &expr28_0)?; + return Some(expr29_0); } // Generated as internal constructor for term do_shift. @@ -6419,8 +6184,9 @@ pub fn constructor_do_shift( if let Some(pattern8_0) = closure8() { if let Some(pattern9_0) = C::imm_shift_from_imm64(ctx, pattern6_1, pattern8_0) { // Rule at src/isa/aarch64/lower.isle line 761. - let expr0_0 = - constructor_alu_rr_imm_shift(ctx, pattern0_0, pattern2_0, pattern9_0)?; + let expr0_0 = constructor_alu_rr_imm_shift( + ctx, pattern0_0, pattern1_0, pattern2_0, pattern9_0, + )?; return Some(expr0_0); } } @@ -6433,21 +6199,23 @@ pub fn constructor_do_shift( let pattern3_0 = arg2; let pattern4_0 = arg3; // Rule at src/isa/aarch64/lower.isle line 752. - let expr0_0 = C::put_in_regs(ctx, pattern4_0); - let expr1_0: usize = 0; - let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); - let expr3_0 = constructor_alu_rrr(ctx, pattern0_0, pattern3_0, expr2_0)?; - return Some(expr3_0); + let expr0_0: Type = I32; + let expr1_0 = C::put_in_regs(ctx, pattern4_0); + let expr2_0: usize = 0; + let expr3_0 = C::value_regs_get(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, pattern3_0, expr3_0)?; + return Some(expr4_0); } if pattern1_0 == I64 { let pattern3_0 = arg2; let pattern4_0 = arg3; // Rule at src/isa/aarch64/lower.isle line 753. - let expr0_0 = C::put_in_regs(ctx, pattern4_0); - let expr1_0: usize = 0; - let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); - let expr3_0 = constructor_alu_rrr(ctx, pattern0_0, pattern3_0, expr2_0)?; - return Some(expr3_0); + let expr0_0: Type = I64; + let expr1_0 = C::put_in_regs(ctx, pattern4_0); + let expr2_0: usize = 0; + let expr3_0 = C::value_regs_get(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, pattern3_0, expr3_0)?; + return Some(expr4_0); } if let Some(pattern2_0) = C::fits_in_16(ctx, pattern1_0) { let pattern3_0 = arg2; @@ -6456,10 +6224,12 @@ pub fn constructor_do_shift( let expr0_0 = C::put_in_regs(ctx, pattern4_0); let expr1_0: usize = 0; let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); - let expr3_0 = C::shift_mask(ctx, pattern2_0); - let expr4_0 = constructor_and32_imm(ctx, expr2_0, expr3_0)?; - let expr5_0 = constructor_alu_rrr(ctx, pattern0_0, pattern3_0, expr4_0)?; - return Some(expr5_0); + let expr3_0: Type = I32; + let expr4_0 = C::shift_mask(ctx, pattern2_0); + let expr5_0 = constructor_and_imm(ctx, expr3_0, expr2_0, expr4_0)?; + let expr6_0: Type = I32; + let expr7_0 = constructor_alu_rrr(ctx, pattern0_0, expr6_0, pattern3_0, expr5_0)?; + return Some(expr7_0); } return None; } @@ -6477,26 +6247,33 @@ pub fn constructor_lower_ushr128( let expr1_0 = C::value_regs_get(ctx, pattern0_0, expr0_0); let expr2_0: usize = 1; let expr3_0 = C::value_regs_get(ctx, pattern0_0, expr2_0); - let expr4_0 = constructor_lsr64(ctx, expr1_0, pattern1_0)?; - let expr5_0 = constructor_lsr64(ctx, expr3_0, pattern1_0)?; - let expr6_0 = C::zero_reg(ctx); - let expr7_0 = constructor_orr_not32(ctx, expr6_0, pattern1_0)?; - let expr8_0: u8 = 1; - let expr9_0 = C::imm_shift_from_u8(ctx, expr8_0); - let expr10_0 = constructor_lsl64_imm(ctx, expr3_0, expr9_0)?; - let expr11_0 = constructor_lsl64(ctx, expr10_0, expr7_0)?; - let expr12_0 = constructor_orr64(ctx, expr4_0, expr11_0)?; - let expr13_0: Type = I64; - let expr14_0: u64 = 64; - let expr15_0 = C::u64_into_imm_logic(ctx, expr13_0, expr14_0); - let expr16_0 = constructor_tst64_imm(ctx, pattern1_0, expr15_0)?; - let expr17_0 = Cond::Ne; - let expr18_0 = constructor_csel(ctx, &expr17_0, expr5_0, expr12_0)?; - let expr19_0 = Cond::Ne; - let expr20_0 = C::zero_reg(ctx); - let expr21_0 = constructor_csel(ctx, &expr19_0, expr20_0, expr5_0)?; - let expr22_0 = constructor_with_flags_2(ctx, &expr16_0, &expr18_0, &expr21_0)?; - return Some(expr22_0); + let expr4_0: Type = I64; + let expr5_0 = constructor_lsr(ctx, expr4_0, expr1_0, pattern1_0)?; + let expr6_0: Type = I64; + let expr7_0 = constructor_lsr(ctx, expr6_0, expr3_0, pattern1_0)?; + let expr8_0: Type = I32; + let expr9_0 = C::zero_reg(ctx); + let expr10_0 = constructor_orr_not(ctx, expr8_0, expr9_0, pattern1_0)?; + let expr11_0: Type = I64; + let expr12_0: Type = I64; + let expr13_0: u8 = 1; + let expr14_0 = C::imm_shift_from_u8(ctx, expr13_0); + let expr15_0 = constructor_lsl_imm(ctx, expr12_0, expr3_0, expr14_0)?; + let expr16_0 = constructor_lsl(ctx, expr11_0, expr15_0, expr10_0)?; + let expr17_0: Type = I64; + let expr18_0 = constructor_orr(ctx, expr17_0, expr5_0, expr16_0)?; + let expr19_0: Type = I64; + let expr20_0: Type = I64; + let expr21_0: u64 = 64; + let expr22_0 = C::u64_into_imm_logic(ctx, expr20_0, expr21_0); + let expr23_0 = constructor_tst_imm(ctx, expr19_0, pattern1_0, expr22_0)?; + let expr24_0 = Cond::Ne; + let expr25_0 = constructor_csel(ctx, &expr24_0, expr7_0, expr18_0)?; + let expr26_0 = Cond::Ne; + let expr27_0 = C::zero_reg(ctx); + let expr28_0 = constructor_csel(ctx, &expr26_0, expr27_0, expr7_0)?; + let expr29_0 = constructor_with_flags_2(ctx, &expr23_0, &expr25_0, &expr28_0)?; + return Some(expr29_0); } // Generated as internal constructor for term lower_sshr128. @@ -6512,28 +6289,36 @@ pub fn constructor_lower_sshr128( let expr1_0 = C::value_regs_get(ctx, pattern0_0, expr0_0); let expr2_0: usize = 1; let expr3_0 = C::value_regs_get(ctx, pattern0_0, expr2_0); - let expr4_0 = constructor_lsr64(ctx, expr1_0, pattern1_0)?; - let expr5_0 = constructor_asr64(ctx, expr3_0, pattern1_0)?; - let expr6_0 = C::zero_reg(ctx); - let expr7_0 = constructor_orr_not32(ctx, expr6_0, pattern1_0)?; - let expr8_0: u8 = 1; - let expr9_0 = C::imm_shift_from_u8(ctx, expr8_0); - let expr10_0 = constructor_lsl64_imm(ctx, expr3_0, expr9_0)?; - let expr11_0 = constructor_lsl64(ctx, expr10_0, expr7_0)?; - let expr12_0: u8 = 63; - let expr13_0 = C::imm_shift_from_u8(ctx, expr12_0); - let expr14_0 = constructor_asr64_imm(ctx, expr3_0, expr13_0)?; - let expr15_0 = constructor_orr64(ctx, expr4_0, expr11_0)?; - let expr16_0: Type = I64; - let expr17_0: u64 = 64; - let expr18_0 = C::u64_into_imm_logic(ctx, expr16_0, expr17_0); - let expr19_0 = constructor_tst64_imm(ctx, pattern1_0, expr18_0)?; - let expr20_0 = Cond::Ne; - let expr21_0 = constructor_csel(ctx, &expr20_0, expr5_0, expr15_0)?; - let expr22_0 = Cond::Ne; - let expr23_0 = constructor_csel(ctx, &expr22_0, expr14_0, expr5_0)?; - let expr24_0 = constructor_with_flags_2(ctx, &expr19_0, &expr21_0, &expr23_0)?; - return Some(expr24_0); + let expr4_0: Type = I64; + let expr5_0 = constructor_lsr(ctx, expr4_0, expr1_0, pattern1_0)?; + let expr6_0: Type = I64; + let expr7_0 = constructor_asr(ctx, expr6_0, expr3_0, pattern1_0)?; + let expr8_0: Type = I32; + let expr9_0 = C::zero_reg(ctx); + let expr10_0 = constructor_orr_not(ctx, expr8_0, expr9_0, pattern1_0)?; + let expr11_0: Type = I64; + let expr12_0: Type = I64; + let expr13_0: u8 = 1; + let expr14_0 = C::imm_shift_from_u8(ctx, expr13_0); + let expr15_0 = constructor_lsl_imm(ctx, expr12_0, expr3_0, expr14_0)?; + let expr16_0 = constructor_lsl(ctx, expr11_0, expr15_0, expr10_0)?; + let expr17_0: Type = I64; + let expr18_0: u8 = 63; + let expr19_0 = C::imm_shift_from_u8(ctx, expr18_0); + let expr20_0 = constructor_asr_imm(ctx, expr17_0, expr3_0, expr19_0)?; + let expr21_0: Type = I64; + let expr22_0 = constructor_orr(ctx, expr21_0, expr5_0, expr16_0)?; + let expr23_0: Type = I64; + let expr24_0: Type = I64; + let expr25_0: u64 = 64; + let expr26_0 = C::u64_into_imm_logic(ctx, expr24_0, expr25_0); + let expr27_0 = constructor_tst_imm(ctx, expr23_0, pattern1_0, expr26_0)?; + let expr28_0 = Cond::Ne; + let expr29_0 = constructor_csel(ctx, &expr28_0, expr7_0, expr22_0)?; + let expr30_0 = Cond::Ne; + let expr31_0 = constructor_csel(ctx, &expr30_0, expr20_0, expr7_0)?; + let expr32_0 = constructor_with_flags_2(ctx, &expr27_0, &expr29_0, &expr31_0)?; + return Some(expr32_0); } // Generated as internal constructor for term small_rotr. @@ -6547,17 +6332,23 @@ pub fn constructor_small_rotr( let pattern1_0 = arg1; let pattern2_0 = arg2; // Rule at src/isa/aarch64/lower.isle line 960. - let expr0_0 = C::rotr_mask(ctx, pattern0_0); - let expr1_0 = constructor_and32_imm(ctx, pattern2_0, expr0_0)?; - let expr2_0 = C::ty_bits(ctx, pattern0_0); - let expr3_0 = C::u8_into_imm12(ctx, expr2_0); - let expr4_0 = constructor_sub32_imm(ctx, expr1_0, expr3_0)?; - let expr5_0 = C::zero_reg(ctx); - let expr6_0 = constructor_sub32(ctx, expr5_0, expr4_0)?; - let expr7_0 = constructor_lsr32(ctx, pattern1_0, expr1_0)?; - let expr8_0 = constructor_lsl32(ctx, pattern1_0, expr6_0)?; - let expr9_0 = constructor_orr32(ctx, expr8_0, expr7_0)?; - return Some(expr9_0); + let expr0_0: Type = I32; + let expr1_0 = C::rotr_mask(ctx, pattern0_0); + let expr2_0 = constructor_and_imm(ctx, expr0_0, pattern2_0, expr1_0)?; + let expr3_0: Type = I32; + let expr4_0 = C::ty_bits(ctx, pattern0_0); + let expr5_0 = C::u8_into_imm12(ctx, expr4_0); + let expr6_0 = constructor_sub_imm(ctx, expr3_0, expr2_0, expr5_0)?; + let expr7_0: Type = I32; + let expr8_0 = C::zero_reg(ctx); + let expr9_0 = constructor_sub(ctx, expr7_0, expr8_0, expr6_0)?; + let expr10_0: Type = I32; + let expr11_0 = constructor_lsr(ctx, expr10_0, pattern1_0, expr2_0)?; + let expr12_0: Type = I32; + let expr13_0 = constructor_lsl(ctx, expr12_0, pattern1_0, expr9_0)?; + let expr14_0: Type = I32; + let expr15_0 = constructor_orr(ctx, expr14_0, expr13_0, expr11_0)?; + return Some(expr15_0); } // Generated as internal constructor for term small_rotr_imm. @@ -6571,11 +6362,14 @@ pub fn constructor_small_rotr_imm( let pattern1_0 = arg1; let pattern2_0 = arg2; // Rule at src/isa/aarch64/lower.isle line 983. - let expr0_0 = constructor_lsr32_imm(ctx, pattern1_0, pattern2_0)?; - let expr1_0 = C::rotr_opposite_amount(ctx, pattern0_0, pattern2_0); - let expr2_0 = constructor_lsl32_imm(ctx, pattern1_0, expr1_0)?; - let expr3_0 = constructor_orr32(ctx, expr2_0, expr0_0)?; - return Some(expr3_0); + let expr0_0: Type = I32; + let expr1_0 = constructor_lsr_imm(ctx, expr0_0, pattern1_0, pattern2_0)?; + let expr2_0: Type = I32; + let expr3_0 = C::rotr_opposite_amount(ctx, pattern0_0, pattern2_0); + let expr4_0 = constructor_lsl_imm(ctx, expr2_0, pattern1_0, expr3_0)?; + let expr5_0: Type = I32; + let expr6_0 = constructor_orr(ctx, expr5_0, expr4_0, expr1_0)?; + return Some(expr6_0); } // Generated as internal constructor for term lower_clz128. @@ -6588,13 +6382,14 @@ pub fn constructor_lower_clz128(ctx: &mut C, arg0: ValueRegs) -> Opt let expr3_0: usize = 0; let expr4_0 = C::value_regs_get(ctx, pattern0_0, expr3_0); let expr5_0 = constructor_clz64(ctx, expr4_0)?; - let expr6_0: u8 = 6; - let expr7_0 = C::imm_shift_from_u8(ctx, expr6_0); - let expr8_0 = constructor_lsr64_imm(ctx, expr2_0, expr7_0)?; - let expr9_0 = constructor_madd64(ctx, expr5_0, expr8_0, expr2_0)?; - let expr10_0: Type = I64; - let expr11_0: u64 = 0; - let expr12_0 = constructor_imm(ctx, expr10_0, expr11_0)?; - let expr13_0 = C::value_regs(ctx, expr9_0, expr12_0); - return Some(expr13_0); + let expr6_0: Type = I64; + let expr7_0: u8 = 6; + let expr8_0 = C::imm_shift_from_u8(ctx, expr7_0); + let expr9_0 = constructor_lsr_imm(ctx, expr6_0, expr2_0, expr8_0)?; + let expr10_0 = constructor_madd64(ctx, expr5_0, expr9_0, expr2_0)?; + let expr11_0: Type = I64; + let expr12_0: u64 = 0; + let expr13_0 = constructor_imm(ctx, expr11_0, expr12_0)?; + let expr14_0 = C::value_regs(ctx, expr10_0, expr13_0); + return Some(expr14_0); } diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 8fc549ac2e..4f03159b81 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -410,16 +410,17 @@ pub(crate) fn lower_insn_to_regs>( lower_fcmp_or_ffcmp_to_flags(ctx, fcmp_insn); cond } else { - let (cmp_op, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 { - (ALUOp::SubS64, NarrowValueMode::ZeroExtend64) + let (size, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 { + (OperandSize::Size64, NarrowValueMode::ZeroExtend64) } else { - (ALUOp::SubS32, NarrowValueMode::ZeroExtend32) + (OperandSize::Size32, NarrowValueMode::ZeroExtend32) }; let rcond = put_input_in_reg(ctx, inputs[0], narrow_mode); // cmp rcond, #0 ctx.emit(Inst::AluRRR { - alu_op: cmp_op, + alu_op: ALUOp::SubS, + size, rd: writable_zero_reg(), rn: rcond, rm: zero_reg(), @@ -507,21 +508,24 @@ pub(crate) fn lower_insn_to_regs>( let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None); // AND rTmp, rn, rcond ctx.emit(Inst::AluRRR { - alu_op: ALUOp::And64, + alu_op: ALUOp::And, + size: OperandSize::Size64, rd: tmp, rn, rm: rcond, }); // BIC rd, rm, rcond ctx.emit(Inst::AluRRR { - alu_op: ALUOp::AndNot64, + alu_op: ALUOp::AndNot, + size: OperandSize::Size64, rd, rn: rm, rm: rcond, }); // ORR rd, rd, rTmp ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd, rn: rd.to_reg(), rm: tmp.to_reg(), @@ -571,16 +575,22 @@ pub(crate) fn lower_insn_to_regs>( let (alu_op, const_value) = match op { Opcode::IsNull => { // cmp rn, #0 - (choose_32_64(ty, ALUOp::SubS32, ALUOp::SubS64), 0) + (ALUOp::SubS, 0) } Opcode::IsInvalid => { // cmn rn, #1 - (choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64), 1) + (ALUOp::AddS, 1) } _ => unreachable!(), }; let const_value = ResultRSEImm12::Imm12(Imm12::maybe_from_u64(const_value).unwrap()); - ctx.emit(alu_inst_imm12(alu_op, writable_zero_reg(), rn, const_value)); + ctx.emit(alu_inst_imm12( + alu_op, + ty, + writable_zero_reg(), + rn, + const_value, + )); materialize_bool_result(ctx, insn, rd, Cond::Eq); } @@ -655,7 +665,8 @@ pub(crate) fn lower_insn_to_regs>( let output = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::AluRRImmLogic { - alu_op: ALUOp::And32, + alu_op: ALUOp::And, + size: OperandSize::Size32, rd: output.regs()[0], rn: input.regs()[0], imml: ImmLogic::maybe_from_u64(1, I32).unwrap(), @@ -1164,7 +1175,8 @@ pub(crate) fn lower_insn_to_regs>( }); ctx.emit(Inst::AluRRImm12 { - alu_op: ALUOp::SubS64, + alu_op: ALUOp::SubS, + size: OperandSize::Size64, rd: writable_zero_reg(), rn: rd.to_reg(), imm12: Imm12::zero(), @@ -1267,7 +1279,8 @@ pub(crate) fn lower_insn_to_regs>( size: VectorSize::Size64x2, }); ctx.emit(Inst::AluRRImmShift { - alu_op: ALUOp::Lsl64, + alu_op: ALUOp::Lsl, + size: OperandSize::Size64, rd: tmp_r0, rn: tmp_r0.to_reg(), immshift: ImmShift { imm: 4 }, @@ -1322,7 +1335,8 @@ pub(crate) fn lower_insn_to_regs>( size: VectorSize::Size64x2, }); ctx.emit(Inst::AluRRImmShift { - alu_op: ALUOp::Lsl64, + alu_op: ALUOp::Lsl, + size: OperandSize::Size64, rd: tmp_r0, rn: tmp_r0.to_reg(), immshift: ImmShift { imm: 2 }, @@ -1372,19 +1386,22 @@ pub(crate) fn lower_insn_to_regs>( size: VectorSize::Size64x2, }); ctx.emit(Inst::AluRRImmShift { - alu_op: ALUOp::Lsr64, + alu_op: ALUOp::Lsr, + size: OperandSize::Size64, rd: dst_r, rn: dst_r.to_reg(), immshift: ImmShift::maybe_from_u64(63).unwrap(), }); ctx.emit(Inst::AluRRImmShift { - alu_op: ALUOp::Lsr64, + alu_op: ALUOp::Lsr, + size: OperandSize::Size64, rd: tmp_r0, rn: tmp_r0.to_reg(), immshift: ImmShift::maybe_from_u64(63).unwrap(), }); ctx.emit(Inst::AluRRRShift { - alu_op: ALUOp::Add32, + alu_op: ALUOp::Add, + size: OperandSize::Size32, rd: dst_r, rn: dst_r.to_reg(), rm: tmp_r0.to_reg(), @@ -2255,8 +2272,7 @@ pub(crate) fn lower_insn_to_regs>( let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None); let ty = ty.unwrap(); - let alu_op = choose_32_64(ty, ALUOp::AddS32, ALUOp::AddS64); - ctx.emit(alu_inst_imm12(alu_op, rd, rn, rm)); + ctx.emit(alu_inst_imm12(ALUOp::AddS, ty, rd, rn, rm)); } Opcode::IaddImm @@ -2572,7 +2588,8 @@ pub(crate) fn lower_branch>( let tmp = ctx.alloc_tmp(I64).only_reg().unwrap(); let input = put_input_in_regs(ctx, flag_input); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::Orr64, + alu_op: ALUOp::Orr, + size: OperandSize::Size64, rd: tmp, rn: input.regs()[0], rm: input.regs()[1], @@ -2710,7 +2727,8 @@ pub(crate) fn lower_branch>( // branch to default target below. if let Some(imm12) = Imm12::maybe_from_u64(jt_size as u64) { ctx.emit(Inst::AluRRImm12 { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_zero_reg(), rn: ridx, imm12, @@ -2718,7 +2736,8 @@ pub(crate) fn lower_branch>( } else { lower_constant_u64(ctx, rtmp1, jt_size as u64); ctx.emit(Inst::AluRRR { - alu_op: ALUOp::SubS32, + alu_op: ALUOp::SubS, + size: OperandSize::Size32, rd: writable_zero_reg(), rn: ridx, rm: rtmp1.to_reg(),