From b5531580e78200f95c7bc8e6f672c4e32282f473 Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Thu, 2 Dec 2021 15:38:15 +0000 Subject: [PATCH] Improve code generation for floating-point constants Copyright (c) 2022, Arm Limited. --- cranelift/codegen/src/isa/aarch64/inst.isle | 6 + .../codegen/src/isa/aarch64/inst/emit.rs | 13 + .../src/isa/aarch64/inst/emit_tests.rs | 19 + cranelift/codegen/src/isa/aarch64/inst/mod.rs | 52 ++- .../lower/isle/generated_code.manifest | 2 +- .../isa/aarch64/lower/isle/generated_code.rs | 383 +++++++++--------- .../codegen/src/isa/aarch64/lower_inst.rs | 15 +- .../filetests/isa/aarch64/constants.clif | 121 ++++++ .../filetests/isa/aarch64/fcvt-small.clif | 84 ++-- .../filetests/isa/aarch64/floating-point.clif | 104 +++-- 10 files changed, 490 insertions(+), 309 deletions(-) diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index 784ba4a77a..839b16f7bf 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -454,6 +454,12 @@ (rn Reg) (size ScalarSize)) + ;; Loads a floating-point immediate. + (FpuMoveFPImm + (rd WritableReg) + (imm ASIMDFPModImm) + (size ScalarSize)) + ;; Move to a vector element from a GPR. (MovToVec (rd WritableReg) diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit.rs b/cranelift/codegen/src/isa/aarch64/inst/emit.rs index b60ed37239..bd5c470bd5 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit.rs @@ -1983,6 +1983,19 @@ impl MachInstEmit for Inst { }; sink.put4(template | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg())); } + &Inst::FpuMoveFPImm { rd, imm, size } => { + let size_code = match size { + ScalarSize::Size32 => 0b00, + ScalarSize::Size64 => 0b01, + _ => unimplemented!(), + }; + sink.put4( + 0b000_11110_00_1_00_000_000100_00000_00000 + | size_code << 22 + | ((imm.enc_bits() as u32) << 13) + | machreg_to_vec(rd.to_reg()), + ); + } &Inst::MovToVec { rd, rn, idx, size } => { let (imm5, shift) = match size.lane_size() { ScalarSize::Size8 => (0b00001, 1), diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs index 3d7016af2d..31884dc930 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs @@ -2051,6 +2051,25 @@ fn test_aarch64_binemit() { "8103271E", "fmov s1, w28", )); + insns.push(( + Inst::FpuMoveFPImm { + rd: writable_vreg(31), + imm: ASIMDFPModImm::maybe_from_u64(f64::to_bits(1.0), ScalarSize::Size64).unwrap(), + size: ScalarSize::Size64, + }, + "1F106E1E", + "fmov d31, #1", + )); + insns.push(( + Inst::FpuMoveFPImm { + rd: writable_vreg(1), + imm: ASIMDFPModImm::maybe_from_u64(f32::to_bits(31.0).into(), ScalarSize::Size32) + .unwrap(), + size: ScalarSize::Size32, + }, + "01F0271E", + "fmov s1, #31", + )); insns.push(( Inst::MovToVec { rd: writable_vreg(0), diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 66a6e536d5..27a3445016 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -239,29 +239,35 @@ impl Inst { /// Create instructions that load a 32-bit floating-point constant. pub fn load_fp_constant32 Writable>( rd: Writable, - value: u32, + const_data: u32, mut alloc_tmp: F, ) -> SmallVec<[Inst; 4]> { // Note that we must make sure that all bits outside the lowest 32 are set to 0 // because this function is also used to load wider constants (that have zeros // in their most significant bits). - if value == 0 { + if const_data == 0 { smallvec![Inst::VecDupImm { rd, imm: ASIMDMovModImm::zero(ScalarSize::Size32), invert: false, - size: VectorSize::Size32x2 + size: VectorSize::Size32x2, + }] + } else if let Some(imm) = + ASIMDFPModImm::maybe_from_u64(const_data.into(), ScalarSize::Size32) + { + smallvec![Inst::FpuMoveFPImm { + rd, + imm, + size: ScalarSize::Size32, }] } else { - // TODO: use FMOV immediate form when `value` has sufficiently few mantissa/exponent - // bits. let tmp = alloc_tmp(I32); - let mut insts = Inst::load_constant(tmp, value as u64); + let mut insts = Inst::load_constant(tmp, const_data as u64); insts.push(Inst::MovToFpu { rd, rn: tmp.to_reg(), - size: ScalarSize::Size64, + size: ScalarSize::Size32, }); insts @@ -277,11 +283,23 @@ impl Inst { // Note that we must make sure that all bits outside the lowest 64 are set to 0 // because this function is also used to load wider constants (that have zeros // in their most significant bits). - if let Ok(const_data) = u32::try_from(const_data) { + // TODO: Treat as half of a 128 bit vector and consider replicated patterns. + // Scalar MOVI might also be an option. + if const_data == 0 { + smallvec![Inst::VecDupImm { + rd, + imm: ASIMDMovModImm::zero(ScalarSize::Size32), + invert: false, + size: VectorSize::Size32x2, + }] + } else if let Some(imm) = ASIMDFPModImm::maybe_from_u64(const_data, ScalarSize::Size64) { + smallvec![Inst::FpuMoveFPImm { + rd, + imm, + size: ScalarSize::Size64, + }] + } else if let Ok(const_data) = u32::try_from(const_data) { Inst::load_fp_constant32(rd, const_data, alloc_tmp) - // TODO: use FMOV immediate form when `const_data` has sufficiently few mantissa/exponent - // bits. Also, treat it as half of a 128-bit vector and consider replicated - // patterns. Scalar MOVI might also be an option. } else if const_data & (u32::MAX as u64) == 0 { let tmp = alloc_tmp(I64); let mut insts = Inst::load_constant(tmp, const_data); @@ -879,6 +897,9 @@ fn aarch64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) { collector.add_def(rd); collector.add_use(rn); } + &Inst::FpuMoveFPImm { rd, .. } => { + collector.add_def(rd); + } &Inst::MovToVec { rd, rn, .. } => { collector.add_mod(rd); collector.add_use(rn); @@ -1654,6 +1675,9 @@ pub fn aarch64_map_regs(inst: &mut Inst, mapper: &RM) { mapper.map_def(rd); mapper.map_use(rn); } + &mut Inst::FpuMoveFPImm { ref mut rd, .. } => { + mapper.map_def(rd); + } &mut Inst::MovToVec { ref mut rd, ref mut rn, @@ -2693,6 +2717,12 @@ impl Inst { let rn = show_ireg_sized(rn, mb_rru, operand_size); format!("fmov {}, {}", rd, rn) } + &Inst::FpuMoveFPImm { rd, imm, size } => { + let imm = imm.show_rru(mb_rru); + let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size); + + format!("fmov {}, {}", rd, imm) + } &Inst::MovToVec { rd, rn, idx, size } => { let rd = show_vreg_element(rd.to_reg(), mb_rru, idx, size); let rn = show_ireg_sized(rn, mb_rru, size.operand_size()); diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest index f75e4e5adb..09106198bb 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest +++ b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.manifest @@ -1,4 +1,4 @@ src/clif.isle f176ef3bba99365 src/prelude.isle 22dd5ff133398960 -src/isa/aarch64/inst.isle 5fa80451697b084f +src/isa/aarch64/inst.isle f946561093de4ff5 src/isa/aarch64/lower.isle 2d2e1e076a0c8a23 diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs index 9764b5b808..8d0ba055c4 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs +++ b/cranelift/codegen/src/isa/aarch64/lower/isle/generated_code.rs @@ -459,6 +459,11 @@ pub enum MInst { rn: Reg, size: ScalarSize, }, + FpuMoveFPImm { + rd: WritableReg, + imm: ASIMDFPModImm, + size: ScalarSize, + }, MovToVec { rd: WritableReg, rn: Reg, @@ -674,7 +679,7 @@ pub enum MInst { }, } -/// Internal type ALUOp: defined at src/isa/aarch64/inst.isle line 783. +/// Internal type ALUOp: defined at src/isa/aarch64/inst.isle line 789. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum ALUOp { Add32, @@ -721,7 +726,7 @@ pub enum ALUOp { SbcS64, } -/// Internal type ALUOp3: defined at src/isa/aarch64/inst.isle line 844. +/// Internal type ALUOp3: defined at src/isa/aarch64/inst.isle line 850. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum ALUOp3 { MAdd32, @@ -730,7 +735,7 @@ pub enum ALUOp3 { MSub64, } -/// Internal type BitOp: defined at src/isa/aarch64/inst.isle line 892. +/// Internal type BitOp: defined at src/isa/aarch64/inst.isle line 898. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum BitOp { RBit32, @@ -741,7 +746,7 @@ pub enum BitOp { Cls64, } -/// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 958. +/// Internal type FPUOp1: defined at src/isa/aarch64/inst.isle line 964. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FPUOp1 { Abs32, @@ -754,7 +759,7 @@ pub enum FPUOp1 { Cvt64To32, } -/// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 971. +/// Internal type FPUOp2: defined at src/isa/aarch64/inst.isle line 977. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FPUOp2 { Add32, @@ -775,14 +780,14 @@ pub enum FPUOp2 { Uqsub64, } -/// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 996. +/// Internal type FPUOp3: defined at src/isa/aarch64/inst.isle line 1002. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FPUOp3 { MAdd32, MAdd64, } -/// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 1003. +/// Internal type FpuToIntOp: defined at src/isa/aarch64/inst.isle line 1009. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FpuToIntOp { F32ToU32, @@ -795,7 +800,7 @@ pub enum FpuToIntOp { F64ToI64, } -/// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1016. +/// Internal type IntToFpuOp: defined at src/isa/aarch64/inst.isle line 1022. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum IntToFpuOp { U32ToF32, @@ -808,7 +813,7 @@ pub enum IntToFpuOp { I64ToF64, } -/// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1030. +/// Internal type FpuRoundMode: defined at src/isa/aarch64/inst.isle line 1036. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FpuRoundMode { Minus32, @@ -821,7 +826,7 @@ pub enum FpuRoundMode { Nearest64, } -/// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1043. +/// Internal type VecExtendOp: defined at src/isa/aarch64/inst.isle line 1049. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecExtendOp { Sxtl8, @@ -832,7 +837,7 @@ pub enum VecExtendOp { Uxtl32, } -/// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1060. +/// Internal type VecALUOp: defined at src/isa/aarch64/inst.isle line 1066. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecALUOp { Sqadd, @@ -874,7 +879,7 @@ pub enum VecALUOp { Sqrdmulh, } -/// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1139. +/// Internal type VecMisc2: defined at src/isa/aarch64/inst.isle line 1145. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecMisc2 { Not, @@ -896,7 +901,7 @@ pub enum VecMisc2 { Cmeq0, } -/// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1178. +/// Internal type VecRRLongOp: defined at src/isa/aarch64/inst.isle line 1184. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecRRLongOp { Fcvtl16, @@ -906,7 +911,7 @@ pub enum VecRRLongOp { Shll32, } -/// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1193. +/// Internal type VecRRNarrowOp: defined at src/isa/aarch64/inst.isle line 1199. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecRRNarrowOp { Xtn16, @@ -925,7 +930,7 @@ pub enum VecRRNarrowOp { Fcvtn64, } -/// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1225. +/// Internal type VecRRRLongOp: defined at src/isa/aarch64/inst.isle line 1231. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecRRRLongOp { Smull8, @@ -939,13 +944,13 @@ pub enum VecRRRLongOp { Umlal32, } -/// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1242. +/// Internal type VecPairOp: defined at src/isa/aarch64/inst.isle line 1248. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecPairOp { Addp, } -/// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1250. +/// Internal type VecRRPairLongOp: defined at src/isa/aarch64/inst.isle line 1256. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecRRPairLongOp { Saddlp8, @@ -954,14 +959,14 @@ pub enum VecRRPairLongOp { Uaddlp16, } -/// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1261. +/// Internal type VecLanesOp: defined at src/isa/aarch64/inst.isle line 1267. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecLanesOp { Addv, Uminv, } -/// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1270. +/// Internal type VecShiftImmOp: defined at src/isa/aarch64/inst.isle line 1276. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum VecShiftImmOp { Shl, @@ -969,7 +974,7 @@ pub enum VecShiftImmOp { Sshr, } -/// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1281. +/// Internal type AtomicRMWOp: defined at src/isa/aarch64/inst.isle line 1287. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum AtomicRMWOp { Add, @@ -1117,28 +1122,28 @@ pub fn constructor_vector_size(ctx: &mut C, arg0: Type) -> Option( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1384. + // Rule at src/isa/aarch64/inst.isle line 1390. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovZ { @@ -1176,7 +1181,7 @@ pub fn constructor_movn( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1391. + // Rule at src/isa/aarch64/inst.isle line 1397. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovN { @@ -1199,7 +1204,7 @@ pub fn constructor_alu_rr_imm_logic( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1398. + // Rule at src/isa/aarch64/inst.isle line 1404. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRImmLogic { @@ -1223,7 +1228,7 @@ pub fn constructor_alu_rr_imm_shift( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1405. + // Rule at src/isa/aarch64/inst.isle line 1411. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRImmShift { @@ -1247,7 +1252,7 @@ pub fn constructor_alu_rrr( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1412. + // Rule at src/isa/aarch64/inst.isle line 1418. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRR { @@ -1273,7 +1278,7 @@ pub fn constructor_vec_rrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1419. + // Rule at src/isa/aarch64/inst.isle line 1425. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRR { @@ -1298,7 +1303,7 @@ pub fn constructor_vec_lanes( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1426. + // Rule at src/isa/aarch64/inst.isle line 1432. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecLanes { @@ -1316,7 +1321,7 @@ pub fn constructor_vec_lanes( pub fn constructor_vec_dup(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1433. + // Rule at src/isa/aarch64/inst.isle line 1439. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecDup { @@ -1339,7 +1344,7 @@ pub fn constructor_alu_rr_imm12( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1440. + // Rule at src/isa/aarch64/inst.isle line 1446. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRImm12 { @@ -1365,7 +1370,7 @@ pub fn constructor_alu_rrr_shift( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1447. + // Rule at src/isa/aarch64/inst.isle line 1453. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRRShift { @@ -1392,7 +1397,7 @@ pub fn constructor_alu_rrr_extend( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1454. + // Rule at src/isa/aarch64/inst.isle line 1460. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRRExtend { @@ -1417,7 +1422,7 @@ pub fn constructor_alu_rr_extend_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1462. + // Rule at src/isa/aarch64/inst.isle line 1468. let expr0_0 = C::put_extended_in_reg(ctx, pattern2_0); let expr1_0 = C::get_extended_op(ctx, pattern2_0); let expr2_0 = constructor_alu_rrr_extend(ctx, pattern0_0, pattern1_0, expr0_0, &expr1_0)?; @@ -1436,7 +1441,7 @@ pub fn constructor_alu_rrrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1469. + // Rule at src/isa/aarch64/inst.isle line 1475. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::AluRRRR { @@ -1455,7 +1460,7 @@ pub fn constructor_alu_rrrr( pub fn constructor_bit_rr(ctx: &mut C, arg0: &BitOp, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1476. + // Rule at src/isa/aarch64/inst.isle line 1482. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::BitRR { @@ -1476,7 +1481,7 @@ pub fn constructor_add64_with_flags( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1483. + // Rule at src/isa/aarch64/inst.isle line 1489. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = ALUOp::AddS64; @@ -1498,7 +1503,7 @@ pub fn constructor_add64_with_flags( pub fn constructor_adc64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1490. + // Rule at src/isa/aarch64/inst.isle line 1496. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = ALUOp::Adc64; @@ -1524,7 +1529,7 @@ pub fn constructor_sub64_with_flags( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1497. + // Rule at src/isa/aarch64/inst.isle line 1503. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = ALUOp::SubS64; @@ -1550,7 +1555,7 @@ pub fn constructor_cmp64_imm( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1503. + // Rule at src/isa/aarch64/inst.isle line 1509. let expr0_0 = ALUOp::SubS64; let expr1_0 = C::writable_zero_reg(ctx); let expr2_0 = MInst::AluRRImm12 { @@ -1571,7 +1576,7 @@ pub fn constructor_cmp64_imm( pub fn constructor_sbc64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1509. + // Rule at src/isa/aarch64/inst.isle line 1515. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = ALUOp::Sbc64; @@ -1599,7 +1604,7 @@ pub fn constructor_vec_misc( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1516. + // Rule at src/isa/aarch64/inst.isle line 1522. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecMisc { @@ -1625,7 +1630,7 @@ pub fn constructor_vec_rrr_long( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1523. + // Rule at src/isa/aarch64/inst.isle line 1529. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRRLong { @@ -1654,7 +1659,7 @@ pub fn constructor_vec_rrrr_long( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/aarch64/inst.isle line 1533. + // Rule at src/isa/aarch64/inst.isle line 1539. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuMove128 { @@ -1684,7 +1689,7 @@ pub fn constructor_vec_rr_narrow( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1541. + // Rule at src/isa/aarch64/inst.isle line 1547. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRNarrow { @@ -1708,7 +1713,7 @@ pub fn constructor_vec_rr_long( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1548. + // Rule at src/isa/aarch64/inst.isle line 1554. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::VecRRLong { @@ -1730,7 +1735,7 @@ pub fn constructor_mov_to_fpu( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1555. + // Rule at src/isa/aarch64/inst.isle line 1561. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovToFpu { @@ -1755,7 +1760,7 @@ pub fn constructor_mov_to_vec( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1562. + // Rule at src/isa/aarch64/inst.isle line 1568. let expr0_0: Type = I8X16; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuMove128 { @@ -1784,7 +1789,7 @@ pub fn constructor_mov_from_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1570. + // Rule at src/isa/aarch64/inst.isle line 1576. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovFromVec { @@ -1810,7 +1815,7 @@ pub fn constructor_mov_from_vec_signed( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1577. + // Rule at src/isa/aarch64/inst.isle line 1583. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovFromVecSigned { @@ -1837,7 +1842,7 @@ pub fn constructor_extend( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1584. + // Rule at src/isa/aarch64/inst.isle line 1590. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::Extend { @@ -1856,7 +1861,7 @@ pub fn constructor_extend( pub fn constructor_load_acquire(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1591. + // Rule at src/isa/aarch64/inst.isle line 1597. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadAcquire { @@ -1877,7 +1882,7 @@ pub fn constructor_tst64_imm( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1601. + // Rule at src/isa/aarch64/inst.isle line 1607. let expr0_0 = ALUOp::AndS64; let expr1_0 = C::writable_zero_reg(ctx); let expr2_0 = MInst::AluRRImmLogic { @@ -1904,7 +1909,7 @@ pub fn constructor_csel( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1614. + // Rule at src/isa/aarch64/inst.isle line 1620. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::CSel { @@ -1927,14 +1932,14 @@ pub fn constructor_add(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1623. + // Rule at src/isa/aarch64/inst.isle line 1629. let expr0_0 = constructor_add64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1622. + // Rule at src/isa/aarch64/inst.isle line 1628. let expr0_0 = constructor_add32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -1945,7 +1950,7 @@ pub fn constructor_add(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg pub fn constructor_add32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1626. + // Rule at src/isa/aarch64/inst.isle line 1632. let expr0_0 = ALUOp::Add32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -1955,7 +1960,7 @@ pub fn constructor_add32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_add64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1629. + // Rule at src/isa/aarch64/inst.isle line 1635. let expr0_0 = ALUOp::Add64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -1972,14 +1977,14 @@ pub fn constructor_add_imm( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1633. + // Rule at src/isa/aarch64/inst.isle line 1639. let expr0_0 = constructor_add64_imm(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1632. + // Rule at src/isa/aarch64/inst.isle line 1638. let expr0_0 = constructor_add32_imm(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -1990,7 +1995,7 @@ pub fn constructor_add_imm( pub fn constructor_add32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1636. + // Rule at src/isa/aarch64/inst.isle line 1642. let expr0_0 = ALUOp::Add32; let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2000,7 +2005,7 @@ pub fn constructor_add32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> pub fn constructor_add64_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1639. + // Rule at src/isa/aarch64/inst.isle line 1645. let expr0_0 = ALUOp::Add64; let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2017,14 +2022,14 @@ pub fn constructor_add_extend( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1643. + // Rule at src/isa/aarch64/inst.isle line 1649. let expr0_0 = constructor_add64_extend(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1642. + // Rule at src/isa/aarch64/inst.isle line 1648. let expr0_0 = constructor_add32_extend(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -2039,7 +2044,7 @@ pub fn constructor_add32_extend( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1646. + // Rule at src/isa/aarch64/inst.isle line 1652. let expr0_0 = ALUOp::Add32; let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2053,7 +2058,7 @@ pub fn constructor_add64_extend( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1649. + // Rule at src/isa/aarch64/inst.isle line 1655. let expr0_0 = ALUOp::Add64; let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2072,7 +2077,7 @@ pub fn constructor_add_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1653. + // Rule at src/isa/aarch64/inst.isle line 1659. let expr0_0 = constructor_add64_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2080,7 +2085,7 @@ pub fn constructor_add_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1652. + // Rule at src/isa/aarch64/inst.isle line 1658. let expr0_0 = constructor_add32_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2097,7 +2102,7 @@ pub fn constructor_add32_shift( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1656. + // Rule at src/isa/aarch64/inst.isle line 1662. let expr0_0 = ALUOp::Add32; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2113,7 +2118,7 @@ pub fn constructor_add64_shift( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1659. + // Rule at src/isa/aarch64/inst.isle line 1665. let expr0_0 = ALUOp::Add64; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2129,7 +2134,7 @@ pub fn constructor_add_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1662. + // Rule at src/isa/aarch64/inst.isle line 1668. let expr0_0 = VecALUOp::Add; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2141,14 +2146,14 @@ pub fn constructor_sub(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1668. + // Rule at src/isa/aarch64/inst.isle line 1674. let expr0_0 = constructor_sub64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1667. + // Rule at src/isa/aarch64/inst.isle line 1673. let expr0_0 = constructor_sub32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -2159,7 +2164,7 @@ pub fn constructor_sub(ctx: &mut C, arg0: Type, arg1: Reg, arg2: Reg pub fn constructor_sub32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1671. + // Rule at src/isa/aarch64/inst.isle line 1677. let expr0_0 = ALUOp::Sub32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2169,7 +2174,7 @@ pub fn constructor_sub32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_sub64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1674. + // Rule at src/isa/aarch64/inst.isle line 1680. let expr0_0 = ALUOp::Sub64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2186,14 +2191,14 @@ pub fn constructor_sub_imm( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1678. + // Rule at src/isa/aarch64/inst.isle line 1684. let expr0_0 = constructor_sub64_imm(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1677. + // Rule at src/isa/aarch64/inst.isle line 1683. let expr0_0 = constructor_sub32_imm(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -2204,7 +2209,7 @@ pub fn constructor_sub_imm( pub fn constructor_sub32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1681. + // Rule at src/isa/aarch64/inst.isle line 1687. let expr0_0 = ALUOp::Sub32; let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2214,7 +2219,7 @@ pub fn constructor_sub32_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> pub fn constructor_sub64_imm(ctx: &mut C, arg0: Reg, arg1: Imm12) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1684. + // Rule at src/isa/aarch64/inst.isle line 1690. let expr0_0 = ALUOp::Sub64; let expr1_0 = constructor_alu_rr_imm12(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2231,14 +2236,14 @@ pub fn constructor_sub_extend( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1688. + // Rule at src/isa/aarch64/inst.isle line 1694. let expr0_0 = constructor_sub64_extend(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1687. + // Rule at src/isa/aarch64/inst.isle line 1693. let expr0_0 = constructor_sub32_extend(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -2253,7 +2258,7 @@ pub fn constructor_sub32_extend( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1691. + // Rule at src/isa/aarch64/inst.isle line 1697. let expr0_0 = ALUOp::Sub32; let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2267,7 +2272,7 @@ pub fn constructor_sub64_extend( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1694. + // Rule at src/isa/aarch64/inst.isle line 1700. let expr0_0 = ALUOp::Sub64; let expr1_0 = constructor_alu_rr_extend_reg(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2286,7 +2291,7 @@ pub fn constructor_sub_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1698. + // Rule at src/isa/aarch64/inst.isle line 1704. let expr0_0 = constructor_sub64_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2294,7 +2299,7 @@ pub fn constructor_sub_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1697. + // Rule at src/isa/aarch64/inst.isle line 1703. let expr0_0 = constructor_sub32_shift(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2311,7 +2316,7 @@ pub fn constructor_sub32_shift( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1701. + // Rule at src/isa/aarch64/inst.isle line 1707. let expr0_0 = ALUOp::Sub32; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2327,7 +2332,7 @@ pub fn constructor_sub64_shift( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1704. + // Rule at src/isa/aarch64/inst.isle line 1710. let expr0_0 = ALUOp::Sub64; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2343,7 +2348,7 @@ pub fn constructor_sub_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1707. + // Rule at src/isa/aarch64/inst.isle line 1713. let expr0_0 = VecALUOp::Sub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2362,7 +2367,7 @@ pub fn constructor_madd( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1713. + // Rule at src/isa/aarch64/inst.isle line 1719. let expr0_0 = constructor_madd64(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2370,7 +2375,7 @@ pub fn constructor_madd( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1712. + // Rule at src/isa/aarch64/inst.isle line 1718. let expr0_0 = constructor_madd32(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2382,7 +2387,7 @@ pub fn constructor_madd32(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1716. + // Rule at src/isa/aarch64/inst.isle line 1722. let expr0_0 = ALUOp3::MAdd32; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2393,7 +2398,7 @@ pub fn constructor_madd64(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1719. + // Rule at src/isa/aarch64/inst.isle line 1725. let expr0_0 = ALUOp3::MAdd64; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2404,7 +2409,7 @@ pub fn constructor_msub64(ctx: &mut C, arg0: Reg, arg1: Reg, arg2: R let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1724. + // Rule at src/isa/aarch64/inst.isle line 1730. let expr0_0 = ALUOp3::MSub64; let expr1_0 = constructor_alu_rrrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2420,7 +2425,7 @@ pub fn constructor_uqadd( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1728. + // Rule at src/isa/aarch64/inst.isle line 1734. let expr0_0 = VecALUOp::Uqadd; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2436,7 +2441,7 @@ pub fn constructor_sqadd( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1732. + // Rule at src/isa/aarch64/inst.isle line 1738. let expr0_0 = VecALUOp::Sqadd; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2452,7 +2457,7 @@ pub fn constructor_uqsub( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1736. + // Rule at src/isa/aarch64/inst.isle line 1742. let expr0_0 = VecALUOp::Uqsub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2468,7 +2473,7 @@ pub fn constructor_sqsub( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1740. + // Rule at src/isa/aarch64/inst.isle line 1746. let expr0_0 = VecALUOp::Sqsub; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2478,7 +2483,7 @@ pub fn constructor_sqsub( pub fn constructor_umulh(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1744. + // Rule at src/isa/aarch64/inst.isle line 1750. let expr0_0 = ALUOp::UMulH; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2488,7 +2493,7 @@ pub fn constructor_umulh(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_smulh(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1748. + // Rule at src/isa/aarch64/inst.isle line 1754. let expr0_0 = ALUOp::SMulH; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2504,7 +2509,7 @@ pub fn constructor_mul( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1752. + // Rule at src/isa/aarch64/inst.isle line 1758. let expr0_0 = VecALUOp::Mul; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2514,7 +2519,7 @@ pub fn constructor_mul( pub fn constructor_neg(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1756. + // Rule at src/isa/aarch64/inst.isle line 1762. let expr0_0 = VecMisc2::Neg; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2524,7 +2529,7 @@ pub fn constructor_neg(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> pub fn constructor_rev64(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1760. + // Rule at src/isa/aarch64/inst.isle line 1766. let expr0_0 = VecMisc2::Rev64; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2534,7 +2539,7 @@ pub fn constructor_rev64(ctx: &mut C, arg0: Reg, arg1: &VectorSize) pub fn constructor_xtn64(ctx: &mut C, arg0: Reg, arg1: bool) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1764. + // Rule at src/isa/aarch64/inst.isle line 1770. let expr0_0 = VecRRNarrowOp::Xtn64; let expr1_0 = constructor_vec_rr_narrow(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2550,7 +2555,7 @@ pub fn constructor_addp( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1768. + // Rule at src/isa/aarch64/inst.isle line 1774. let expr0_0 = VecALUOp::Addp; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2560,7 +2565,7 @@ pub fn constructor_addp( pub fn constructor_addv(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1772. + // Rule at src/isa/aarch64/inst.isle line 1778. let expr0_0 = VecLanesOp::Addv; let expr1_0 = constructor_vec_lanes(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2570,7 +2575,7 @@ pub fn constructor_addv(ctx: &mut C, arg0: Reg, arg1: &VectorSize) - pub fn constructor_shll32(ctx: &mut C, arg0: Reg, arg1: bool) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1776. + // Rule at src/isa/aarch64/inst.isle line 1782. let expr0_0 = VecRRLongOp::Shll32; let expr1_0 = constructor_vec_rr_long(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2588,7 +2593,7 @@ pub fn constructor_umlal32( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1780. + // Rule at src/isa/aarch64/inst.isle line 1786. let expr0_0 = VecRRRLongOp::Umlal32; let expr1_0 = constructor_vec_rrrr_long( ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0, pattern3_0, @@ -2606,7 +2611,7 @@ pub fn constructor_smull8( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1784. + // Rule at src/isa/aarch64/inst.isle line 1790. let expr0_0 = VecRRRLongOp::Smull8; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2622,7 +2627,7 @@ pub fn constructor_umull8( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1788. + // Rule at src/isa/aarch64/inst.isle line 1794. let expr0_0 = VecRRRLongOp::Umull8; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2638,7 +2643,7 @@ pub fn constructor_smull16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1792. + // Rule at src/isa/aarch64/inst.isle line 1798. let expr0_0 = VecRRRLongOp::Smull16; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2654,7 +2659,7 @@ pub fn constructor_umull16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1796. + // Rule at src/isa/aarch64/inst.isle line 1802. let expr0_0 = VecRRRLongOp::Umull16; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2670,7 +2675,7 @@ pub fn constructor_smull32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1800. + // Rule at src/isa/aarch64/inst.isle line 1806. let expr0_0 = VecRRRLongOp::Smull32; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2686,7 +2691,7 @@ pub fn constructor_umull32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1804. + // Rule at src/isa/aarch64/inst.isle line 1810. let expr0_0 = VecRRRLongOp::Umull32; let expr1_0 = constructor_vec_rrr_long(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2696,7 +2701,7 @@ pub fn constructor_umull32( pub fn constructor_asr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1808. + // Rule at src/isa/aarch64/inst.isle line 1814. let expr0_0 = ALUOp::Asr64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2706,7 +2711,7 @@ pub fn constructor_asr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_asr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1811. + // Rule at src/isa/aarch64/inst.isle line 1817. let expr0_0 = ALUOp::Asr64; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2716,7 +2721,7 @@ pub fn constructor_asr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) pub fn constructor_lsr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1815. + // Rule at src/isa/aarch64/inst.isle line 1821. let expr0_0 = ALUOp::Lsr32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2726,7 +2731,7 @@ pub fn constructor_lsr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_lsr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1818. + // Rule at src/isa/aarch64/inst.isle line 1824. let expr0_0 = ALUOp::Lsr32; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2736,7 +2741,7 @@ pub fn constructor_lsr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) pub fn constructor_lsr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1821. + // Rule at src/isa/aarch64/inst.isle line 1827. let expr0_0 = ALUOp::Lsr64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2746,7 +2751,7 @@ pub fn constructor_lsr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_lsr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1824. + // Rule at src/isa/aarch64/inst.isle line 1830. let expr0_0 = ALUOp::Lsr64; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2756,7 +2761,7 @@ pub fn constructor_lsr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) pub fn constructor_lsl32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1828. + // Rule at src/isa/aarch64/inst.isle line 1834. let expr0_0 = ALUOp::Lsl32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2766,7 +2771,7 @@ pub fn constructor_lsl32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_lsl32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1831. + // Rule at src/isa/aarch64/inst.isle line 1837. let expr0_0 = ALUOp::Lsl32; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2776,7 +2781,7 @@ pub fn constructor_lsl32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) pub fn constructor_lsl64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1834. + // Rule at src/isa/aarch64/inst.isle line 1840. let expr0_0 = ALUOp::Lsl64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2786,7 +2791,7 @@ pub fn constructor_lsl64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_lsl64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1837. + // Rule at src/isa/aarch64/inst.isle line 1843. let expr0_0 = ALUOp::Lsl64; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2796,7 +2801,7 @@ pub fn constructor_lsl64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) pub fn constructor_udiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1841. + // Rule at src/isa/aarch64/inst.isle line 1847. let expr0_0 = ALUOp::UDiv64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2806,7 +2811,7 @@ pub fn constructor_udiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Opti pub fn constructor_sdiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1845. + // Rule at src/isa/aarch64/inst.isle line 1851. let expr0_0 = ALUOp::SDiv64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2816,7 +2821,7 @@ pub fn constructor_sdiv64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Opti pub fn constructor_not(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1849. + // Rule at src/isa/aarch64/inst.isle line 1855. let expr0_0 = VecMisc2::Not; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2833,14 +2838,14 @@ pub fn constructor_orr_not( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1855. + // Rule at src/isa/aarch64/inst.isle line 1861. let expr0_0 = constructor_orr_not64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1854. + // Rule at src/isa/aarch64/inst.isle line 1860. let expr0_0 = constructor_orr_not32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -2851,7 +2856,7 @@ pub fn constructor_orr_not( pub fn constructor_orr_not32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1858. + // Rule at src/isa/aarch64/inst.isle line 1864. let expr0_0 = ALUOp::OrrNot32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2861,7 +2866,7 @@ pub fn constructor_orr_not32(ctx: &mut C, arg0: Reg, arg1: Reg) -> O pub fn constructor_orr_not64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1861. + // Rule at src/isa/aarch64/inst.isle line 1867. let expr0_0 = ALUOp::OrrNot64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2880,7 +2885,7 @@ pub fn constructor_orr_not_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1865. + // Rule at src/isa/aarch64/inst.isle line 1871. let expr0_0 = constructor_orr_not_shift64(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2888,7 +2893,7 @@ pub fn constructor_orr_not_shift( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 1864. + // Rule at src/isa/aarch64/inst.isle line 1870. let expr0_0 = constructor_orr_not_shift32(ctx, pattern2_0, pattern3_0, pattern4_0)?; return Some(expr0_0); } @@ -2905,7 +2910,7 @@ pub fn constructor_orr_not_shift32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1868. + // Rule at src/isa/aarch64/inst.isle line 1874. let expr0_0 = ALUOp::OrrNot32; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2921,7 +2926,7 @@ pub fn constructor_orr_not_shift64( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1871. + // Rule at src/isa/aarch64/inst.isle line 1877. let expr0_0 = ALUOp::OrrNot64; let expr1_0 = constructor_alu_rrr_shift(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2931,7 +2936,7 @@ pub fn constructor_orr_not_shift64( pub fn constructor_orr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1876. + // Rule at src/isa/aarch64/inst.isle line 1882. let expr0_0 = ALUOp::Orr32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2941,7 +2946,7 @@ pub fn constructor_orr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_orr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1879. + // Rule at src/isa/aarch64/inst.isle line 1885. let expr0_0 = ALUOp::Orr32; let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2951,7 +2956,7 @@ pub fn constructor_orr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) pub fn constructor_orr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1882. + // Rule at src/isa/aarch64/inst.isle line 1888. let expr0_0 = ALUOp::Orr64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2961,7 +2966,7 @@ pub fn constructor_orr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_orr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1885. + // Rule at src/isa/aarch64/inst.isle line 1891. let expr0_0 = ALUOp::Orr64; let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -2977,7 +2982,7 @@ pub fn constructor_orr_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1888. + // Rule at src/isa/aarch64/inst.isle line 1894. let expr0_0 = VecALUOp::Orr; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -2987,7 +2992,7 @@ pub fn constructor_orr_vec( pub fn constructor_and32_imm(ctx: &mut C, arg0: Reg, arg1: ImmLogic) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1893. + // Rule at src/isa/aarch64/inst.isle line 1899. let expr0_0 = ALUOp::And32; let expr1_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3003,7 +3008,7 @@ pub fn constructor_and_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1896. + // Rule at src/isa/aarch64/inst.isle line 1902. let expr0_0 = VecALUOp::And; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3019,7 +3024,7 @@ pub fn constructor_eor_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1900. + // Rule at src/isa/aarch64/inst.isle line 1906. let expr0_0 = VecALUOp::Eor; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3035,7 +3040,7 @@ pub fn constructor_bic_vec( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1904. + // Rule at src/isa/aarch64/inst.isle line 1910. let expr0_0 = VecALUOp::Bic; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3051,7 +3056,7 @@ pub fn constructor_sshl( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1908. + // Rule at src/isa/aarch64/inst.isle line 1914. let expr0_0 = VecALUOp::Sshl; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3067,7 +3072,7 @@ pub fn constructor_ushl( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 1912. + // Rule at src/isa/aarch64/inst.isle line 1918. let expr0_0 = VecALUOp::Ushl; let expr1_0 = constructor_vec_rrr(ctx, &expr0_0, pattern0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3077,7 +3082,7 @@ pub fn constructor_ushl( pub fn constructor_rotr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1917. + // Rule at src/isa/aarch64/inst.isle line 1923. let expr0_0 = ALUOp::RotR32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3087,7 +3092,7 @@ pub fn constructor_rotr32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Opti pub fn constructor_rotr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1920. + // Rule at src/isa/aarch64/inst.isle line 1926. let expr0_0 = ALUOp::RotR32; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3097,7 +3102,7 @@ pub fn constructor_rotr32_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift pub fn constructor_rotr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1923. + // Rule at src/isa/aarch64/inst.isle line 1929. let expr0_0 = ALUOp::RotR64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3107,7 +3112,7 @@ pub fn constructor_rotr64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Opti pub fn constructor_rotr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1926. + // Rule at src/isa/aarch64/inst.isle line 1932. let expr0_0 = ALUOp::RotR64; let expr1_0 = constructor_alu_rr_imm_shift(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3116,7 +3121,7 @@ pub fn constructor_rotr64_imm(ctx: &mut C, arg0: Reg, arg1: ImmShift // Generated as internal constructor for term rbit32. pub fn constructor_rbit32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1931. + // Rule at src/isa/aarch64/inst.isle line 1937. let expr0_0 = BitOp::RBit32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3125,7 +3130,7 @@ pub fn constructor_rbit32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term rbit64. pub fn constructor_rbit64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1934. + // Rule at src/isa/aarch64/inst.isle line 1940. let expr0_0 = BitOp::RBit64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3134,7 +3139,7 @@ pub fn constructor_rbit64(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term clz32. pub fn constructor_clz32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1939. + // Rule at src/isa/aarch64/inst.isle line 1945. let expr0_0 = BitOp::Clz32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3143,7 +3148,7 @@ pub fn constructor_clz32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term clz64. pub fn constructor_clz64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1942. + // Rule at src/isa/aarch64/inst.isle line 1948. let expr0_0 = BitOp::Clz64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3152,7 +3157,7 @@ pub fn constructor_clz64(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term cls32. pub fn constructor_cls32(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1947. + // Rule at src/isa/aarch64/inst.isle line 1953. let expr0_0 = BitOp::Cls32; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3161,7 +3166,7 @@ pub fn constructor_cls32(ctx: &mut C, arg0: Reg) -> Option { // Generated as internal constructor for term cls64. pub fn constructor_cls64(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 1950. + // Rule at src/isa/aarch64/inst.isle line 1956. let expr0_0 = BitOp::Cls64; let expr1_0 = constructor_bit_rr(ctx, &expr0_0, pattern0_0)?; return Some(expr1_0); @@ -3171,7 +3176,7 @@ pub fn constructor_cls64(ctx: &mut C, arg0: Reg) -> Option { pub fn constructor_eon32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1955. + // Rule at src/isa/aarch64/inst.isle line 1961. let expr0_0 = ALUOp::EorNot32; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3181,7 +3186,7 @@ pub fn constructor_eon32(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_eon64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1958. + // Rule at src/isa/aarch64/inst.isle line 1964. let expr0_0 = ALUOp::EorNot64; let expr1_0 = constructor_alu_rrr(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3191,7 +3196,7 @@ pub fn constructor_eon64(ctx: &mut C, arg0: Reg, arg1: Reg) -> Optio pub fn constructor_vec_cnt(ctx: &mut C, arg0: Reg, arg1: &VectorSize) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/aarch64/inst.isle line 1963. + // Rule at src/isa/aarch64/inst.isle line 1969. let expr0_0 = VecMisc2::Cnt; let expr1_0 = constructor_vec_misc(ctx, &expr0_0, pattern0_0, pattern1_0)?; return Some(expr1_0); @@ -3208,25 +3213,25 @@ pub fn constructor_imm(ctx: &mut C, arg0: Type, arg1: u64) -> Option }; if let Some(pattern3_0) = closure3() { if let Some(pattern4_0) = C::imm_logic_from_u64(ctx, pattern2_0, pattern3_0) { - // Rule at src/isa/aarch64/inst.isle line 1978. + // Rule at src/isa/aarch64/inst.isle line 1984. let expr0_0 = C::zero_reg(ctx); let expr1_0 = constructor_orr64_imm(ctx, expr0_0, pattern4_0)?; return Some(expr1_0); } } if let Some(pattern3_0) = C::move_wide_const_from_u64(ctx, pattern2_0) { - // Rule at src/isa/aarch64/inst.isle line 1970. + // Rule at src/isa/aarch64/inst.isle line 1976. let expr0_0 = OperandSize::Size64; let expr1_0 = constructor_movz(ctx, pattern3_0, &expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::move_wide_const_from_negated_u64(ctx, pattern2_0) { - // Rule at src/isa/aarch64/inst.isle line 1974. + // Rule at src/isa/aarch64/inst.isle line 1980. let expr0_0 = OperandSize::Size64; let expr1_0 = constructor_movn(ctx, pattern3_0, &expr0_0)?; return Some(expr1_0); } - // Rule at src/isa/aarch64/inst.isle line 1985. + // Rule at src/isa/aarch64/inst.isle line 1991. let expr0_0 = C::load_constant64_full(ctx, pattern2_0); return Some(expr0_0); } @@ -3238,17 +3243,17 @@ pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I32 { - // Rule at src/isa/aarch64/inst.isle line 1996. + // Rule at src/isa/aarch64/inst.isle line 2002. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 1997. + // Rule at src/isa/aarch64/inst.isle line 2003. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 1992. + // Rule at src/isa/aarch64/inst.isle line 1998. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = true; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3264,17 +3269,17 @@ pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I32 { - // Rule at src/isa/aarch64/inst.isle line 2005. + // Rule at src/isa/aarch64/inst.isle line 2011. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2006. + // Rule at src/isa/aarch64/inst.isle line 2012. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 2001. + // Rule at src/isa/aarch64/inst.isle line 2007. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = false; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3290,12 +3295,12 @@ pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2014. + // Rule at src/isa/aarch64/inst.isle line 2020. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 2010. + // Rule at src/isa/aarch64/inst.isle line 2016. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = true; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3311,12 +3316,12 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if pattern1_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2022. + // Rule at src/isa/aarch64/inst.isle line 2028. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) { - // Rule at src/isa/aarch64/inst.isle line 2018. + // Rule at src/isa/aarch64/inst.isle line 2024. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0: bool = false; let expr2_0 = C::ty_bits(ctx, pattern2_0); @@ -3330,7 +3335,7 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op // Generated as internal constructor for term trap_if_zero_divisor. pub fn constructor_trap_if_zero_divisor(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/aarch64/inst.isle line 2027. + // Rule at src/isa/aarch64/inst.isle line 2033. let expr0_0 = C::cond_br_zero(ctx, pattern0_0); let expr1_0 = C::trap_code_division_by_zero(ctx); let expr2_0 = MInst::TrapIf { @@ -3345,12 +3350,12 @@ pub fn constructor_trap_if_zero_divisor(ctx: &mut C, arg0: Reg) -> O pub fn constructor_size_from_ty(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2033. + // Rule at src/isa/aarch64/inst.isle line 2039. let expr0_0 = OperandSize::Size64; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/inst.isle line 2032. + // Rule at src/isa/aarch64/inst.isle line 2038. let expr0_0 = OperandSize::Size32; return Some(expr0_0); } @@ -3367,7 +3372,7 @@ pub fn constructor_trap_if_div_overflow( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 2039. + // Rule at src/isa/aarch64/inst.isle line 2045. let expr0_0 = constructor_adds_op(ctx, pattern0_0)?; let expr1_0 = C::writable_zero_reg(ctx); let expr2_0: u8 = 1; @@ -3411,12 +3416,12 @@ pub fn constructor_trap_if_div_overflow( pub fn constructor_adds_op(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/aarch64/inst.isle line 2059. + // Rule at src/isa/aarch64/inst.isle line 2065. let expr0_0 = ALUOp::AddS64; return Some(expr0_0); } if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/inst.isle line 2058. + // Rule at src/isa/aarch64/inst.isle line 2064. let expr0_0 = ALUOp::AddS32; return Some(expr0_0); } @@ -3450,7 +3455,7 @@ pub fn constructor_alu_rs_imm_logic_commutative( C::imm_logic_from_imm64(ctx, pattern5_1, pattern7_0) { let pattern9_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 2089. + // Rule at src/isa/aarch64/inst.isle line 2095. let expr0_0 = C::put_in_reg(ctx, pattern9_0); let expr1_0 = constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern8_0)?; @@ -3481,7 +3486,7 @@ pub fn constructor_alu_rs_imm_logic_commutative( C::lshl_from_imm64(ctx, pattern10_1, pattern12_0) { let pattern14_0 = arg3; - // Rule at src/isa/aarch64/inst.isle line 2095. + // Rule at src/isa/aarch64/inst.isle line 2101. let expr0_0 = C::put_in_reg(ctx, pattern14_0); let expr1_0 = C::put_in_reg(ctx, pattern7_0); let expr2_0 = constructor_alu_rrr_shift( @@ -3518,7 +3523,7 @@ pub fn constructor_alu_rs_imm_logic_commutative( if let Some(pattern9_0) = C::imm_logic_from_imm64(ctx, pattern6_1, pattern8_0) { - // Rule at src/isa/aarch64/inst.isle line 2087. + // Rule at src/isa/aarch64/inst.isle line 2093. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern9_0)?; @@ -3548,7 +3553,7 @@ pub fn constructor_alu_rs_imm_logic_commutative( if let Some(pattern14_0) = C::lshl_from_imm64(ctx, pattern11_1, pattern13_0) { - // Rule at src/isa/aarch64/inst.isle line 2093. + // Rule at src/isa/aarch64/inst.isle line 2099. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern8_0); let expr2_0 = constructor_alu_rrr_shift( @@ -3569,7 +3574,7 @@ pub fn constructor_alu_rs_imm_logic_commutative( _ => {} } } - // Rule at src/isa/aarch64/inst.isle line 2083. + // Rule at src/isa/aarch64/inst.isle line 2089. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern3_0); let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, expr1_0)?; @@ -3603,7 +3608,7 @@ pub fn constructor_alu_rs_imm_logic( if let Some(pattern9_0) = C::imm_logic_from_imm64(ctx, pattern6_1, pattern8_0) { - // Rule at src/isa/aarch64/inst.isle line 2103. + // Rule at src/isa/aarch64/inst.isle line 2109. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_alu_rr_imm_logic(ctx, pattern0_0, expr0_0, pattern9_0)?; @@ -3633,7 +3638,7 @@ pub fn constructor_alu_rs_imm_logic( if let Some(pattern14_0) = C::lshl_from_imm64(ctx, pattern11_1, pattern13_0) { - // Rule at src/isa/aarch64/inst.isle line 2105. + // Rule at src/isa/aarch64/inst.isle line 2111. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern8_0); let expr2_0 = constructor_alu_rrr_shift( @@ -3654,7 +3659,7 @@ pub fn constructor_alu_rs_imm_logic( _ => {} } } - // Rule at src/isa/aarch64/inst.isle line 2101. + // Rule at src/isa/aarch64/inst.isle line 2107. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = C::put_in_reg(ctx, pattern3_0); let expr2_0 = constructor_alu_rrr(ctx, pattern0_0, expr0_0, expr1_0)?; @@ -3671,7 +3676,7 @@ pub fn constructor_i128_alu_bitop( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/aarch64/inst.isle line 2113. + // Rule at src/isa/aarch64/inst.isle line 2119. let expr0_0 = C::put_in_regs(ctx, pattern1_0); let expr1_0: usize = 0; let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 93598cf17c..78cf4f7f24 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -53,16 +53,11 @@ pub(crate) fn lower_insn_to_regs>( match op { Opcode::Iconst | Opcode::Bconst | Opcode::Null => implemented_in_isle(ctx), - Opcode::F32const => { - let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - lower_constant_f32(ctx, rd, value); - } - Opcode::F64const => { - let value = f64::from_bits(ctx.get_constant(insn).unwrap()); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - lower_constant_f64(ctx, rd, value); - } + Opcode::F32const | Opcode::F64const => unreachable!( + "Should never see constant ops at top level lowering entry + point, as constants are rematerialized at use-sites" + ), + Opcode::Iadd => implemented_in_isle(ctx), Opcode::Isub => implemented_in_isle(ctx), Opcode::UaddSat | Opcode::SaddSat | Opcode::UsubSat | Opcode::SsubSat => { diff --git a/cranelift/filetests/filetests/isa/aarch64/constants.clif b/cranelift/filetests/filetests/isa/aarch64/constants.clif index 5533462fe3..9357a75c76 100644 --- a/cranelift/filetests/filetests/isa/aarch64/constants.clif +++ b/cranelift/filetests/filetests/isa/aarch64/constants.clif @@ -292,3 +292,124 @@ block0: ; Inst 1: ret ; }} +function %f() -> f64 { +block0: + v0 = f64const 0x1.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: fmov d0, #1 +; Inst 1: ret +; }} + +function %f() -> f32 { +block0: + v0 = f32const 0x5.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: fmov s0, #5 +; Inst 1: ret +; }} + +function %f() -> f64 { +block0: + v0 = f64const 0x32.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 3) +; Inst 0: movz x0, #16457, LSL #48 +; Inst 1: fmov d0, x0 +; Inst 2: ret +; }} + +function %f() -> f32 { +block0: + v0 = f32const 0x32.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 3) +; Inst 0: movz x0, #16968, LSL #16 +; Inst 1: fmov s0, w0 +; Inst 2: ret +; }} + +function %f() -> f64 { +block0: + v0 = f64const 0x0.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: movi v0.2s, #0 +; Inst 1: ret +; }} + +function %f() -> f32 { +block0: + v0 = f32const 0x0.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: movi v0.2s, #0 +; Inst 1: ret +; }} + +function %f() -> f64 { +block0: + v0 = f64const -0x10.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: fmov d0, #-16 +; Inst 1: ret +; }} + +function %f() -> f32 { +block0: + v0 = f32const -0x10.0 + return v0 +} + +; VCode_ShowWithRRU {{ +; Entry block: 0 +; Block 0: +; (original IR block: block0) +; (instruction range: 0 .. 2) +; Inst 0: fmov s0, #-16 +; Inst 1: ret +; }} diff --git a/cranelift/filetests/filetests/isa/aarch64/fcvt-small.clif b/cranelift/filetests/filetests/isa/aarch64/fcvt-small.clif index 78389658c3..d35b6ef54d 100644 --- a/cranelift/filetests/filetests/isa/aarch64/fcvt-small.clif +++ b/cranelift/filetests/filetests/isa/aarch64/fcvt-small.clif @@ -76,19 +76,18 @@ block0(v0: f32): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49024, LSL #16 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp s0, s1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #17280, LSL #16 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp s0, s1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, s0 -; Inst 11: ret +; Inst 2: fmov s1, #-1 +; Inst 3: fcmp s0, s1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #17280, LSL #16 +; Inst 6: fmov s1, w0 +; Inst 7: fcmp s0, s1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, s0 +; Inst 10: ret ; }} function u0:0(f64) -> i8 { @@ -101,19 +100,18 @@ block0(v0: f64): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp d0, d0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49136, LSL #48 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp d0, d1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #16496, LSL #48 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp d0, d1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, d0 -; Inst 11: ret +; Inst 2: fmov d1, #-1 +; Inst 3: fcmp d0, d1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #16496, LSL #48 +; Inst 6: fmov d1, x0 +; Inst 7: fcmp d0, d1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, d0 +; Inst 10: ret ; }} function u0:0(f32) -> i16 { @@ -126,19 +124,18 @@ block0(v0: f32): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49024, LSL #16 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp s0, s1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #18304, LSL #16 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp s0, s1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, s0 -; Inst 11: ret +; Inst 2: fmov s1, #-1 +; Inst 3: fcmp s0, s1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #18304, LSL #16 +; Inst 6: fmov s1, w0 +; Inst 7: fcmp s0, s1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, s0 +; Inst 10: ret ; }} function u0:0(f64) -> i16 { @@ -151,18 +148,17 @@ block0(v0: f64): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp d0, d0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49136, LSL #48 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp d0, d1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #16624, LSL #48 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp d0, d1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, d0 -; Inst 11: ret +; Inst 2: fmov d1, #-1 +; Inst 3: fcmp d0, d1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #16624, LSL #48 +; Inst 6: fmov d1, x0 +; Inst 7: fcmp d0, d1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, d0 +; Inst 10: ret ; }} diff --git a/cranelift/filetests/filetests/isa/aarch64/floating-point.clif b/cranelift/filetests/filetests/isa/aarch64/floating-point.clif index 0039c977f3..16b34f759f 100644 --- a/cranelift/filetests/filetests/isa/aarch64/floating-point.clif +++ b/cranelift/filetests/filetests/isa/aarch64/floating-point.clif @@ -494,19 +494,18 @@ block0(v0: f32): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49024, LSL #16 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp s0, s1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #20352, LSL #16 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp s0, s1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, s0 -; Inst 11: ret +; Inst 2: fmov s1, #-1 +; Inst 3: fcmp s0, s1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #20352, LSL #16 +; Inst 6: fmov s1, w0 +; Inst 7: fcmp s0, s1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, s0 +; Inst 10: ret ; }} function %f34(f32) -> i32 { @@ -523,11 +522,11 @@ block0(v0: f32): ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf ; Inst 2: movz x0, #52992, LSL #16 -; Inst 3: fmov d1, x0 +; Inst 3: fmov s1, w0 ; Inst 4: fcmp s0, s1 ; Inst 5: b.ge 8 ; udf ; Inst 6: movz x0, #20224, LSL #16 -; Inst 7: fmov d1, x0 +; Inst 7: fmov s1, w0 ; Inst 8: fcmp s0, s1 ; Inst 9: b.mi 8 ; udf ; Inst 10: fcvtzs w0, s0 @@ -544,19 +543,18 @@ block0(v0: f32): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49024, LSL #16 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp s0, s1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #24448, LSL #16 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp s0, s1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu x0, s0 -; Inst 11: ret +; Inst 2: fmov s1, #-1 +; Inst 3: fcmp s0, s1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #24448, LSL #16 +; Inst 6: fmov s1, w0 +; Inst 7: fcmp s0, s1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu x0, s0 +; Inst 10: ret ; }} function %f36(f32) -> i64 { @@ -573,11 +571,11 @@ block0(v0: f32): ; Inst 0: fcmp s0, s0 ; Inst 1: b.vc 8 ; udf ; Inst 2: movz x0, #57088, LSL #16 -; Inst 3: fmov d1, x0 +; Inst 3: fmov s1, w0 ; Inst 4: fcmp s0, s1 ; Inst 5: b.ge 8 ; udf ; Inst 6: movz x0, #24320, LSL #16 -; Inst 7: fmov d1, x0 +; Inst 7: fmov s1, w0 ; Inst 8: fcmp s0, s1 ; Inst 9: b.mi 8 ; udf ; Inst 10: fcvtzs x0, s0 @@ -594,19 +592,18 @@ block0(v0: f64): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp d0, d0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49136, LSL #48 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp d0, d1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #16880, LSL #48 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp d0, d1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu w0, d0 -; Inst 11: ret +; Inst 2: fmov d1, #-1 +; Inst 3: fcmp d0, d1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #16880, LSL #48 +; Inst 6: fmov d1, x0 +; Inst 7: fcmp d0, d1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu w0, d0 +; Inst 10: ret ; }} function %f38(f64) -> i32 { @@ -643,19 +640,18 @@ block0(v0: f64): ; Entry block: 0 ; Block 0: ; (original IR block: block0) -; (instruction range: 0 .. 12) +; (instruction range: 0 .. 11) ; Inst 0: fcmp d0, d0 ; Inst 1: b.vc 8 ; udf -; Inst 2: movz x0, #49136, LSL #48 -; Inst 3: fmov d1, x0 -; Inst 4: fcmp d0, d1 -; Inst 5: b.gt 8 ; udf -; Inst 6: movz x0, #17392, LSL #48 -; Inst 7: fmov d1, x0 -; Inst 8: fcmp d0, d1 -; Inst 9: b.mi 8 ; udf -; Inst 10: fcvtzu x0, d0 -; Inst 11: ret +; Inst 2: fmov d1, #-1 +; Inst 3: fcmp d0, d1 +; Inst 4: b.gt 8 ; udf +; Inst 5: movz x0, #17392, LSL #48 +; Inst 6: fmov d1, x0 +; Inst 7: fcmp d0, d1 +; Inst 8: b.mi 8 ; udf +; Inst 9: fcvtzu x0, d0 +; Inst 10: ret ; }} function %f40(f64) -> i64 { @@ -815,7 +811,7 @@ block0(v0: f32): ; (original IR block: block0) ; (instruction range: 0 .. 9) ; Inst 0: movz x0, #20352, LSL #16 -; Inst 1: fmov d1, x0 +; Inst 1: fmov s1, w0 ; Inst 2: fmin s2, s0, s1 ; Inst 3: movi v1.2s, #0 ; Inst 4: fmax s2, s2, s1 @@ -837,10 +833,10 @@ block0(v0: f32): ; (original IR block: block0) ; (instruction range: 0 .. 11) ; Inst 0: movz x0, #20224, LSL #16 -; Inst 1: fmov d1, x0 +; Inst 1: fmov s1, w0 ; Inst 2: fmin s1, s0, s1 ; Inst 3: movz x0, #52992, LSL #16 -; Inst 4: fmov d2, x0 +; Inst 4: fmov s2, w0 ; Inst 5: fmax s1, s1, s2 ; Inst 6: movi v2.2s, #0 ; Inst 7: fcmp s0, s0 @@ -861,7 +857,7 @@ block0(v0: f32): ; (original IR block: block0) ; (instruction range: 0 .. 9) ; Inst 0: movz x0, #24448, LSL #16 -; Inst 1: fmov d1, x0 +; Inst 1: fmov s1, w0 ; Inst 2: fmin s2, s0, s1 ; Inst 3: movi v1.2s, #0 ; Inst 4: fmax s2, s2, s1 @@ -883,10 +879,10 @@ block0(v0: f32): ; (original IR block: block0) ; (instruction range: 0 .. 11) ; Inst 0: movz x0, #24320, LSL #16 -; Inst 1: fmov d1, x0 +; Inst 1: fmov s1, w0 ; Inst 2: fmin s1, s0, s1 ; Inst 3: movz x0, #57088, LSL #16 -; Inst 4: fmov d2, x0 +; Inst 4: fmov s2, w0 ; Inst 5: fmax s1, s1, s2 ; Inst 6: movi v2.2s, #0 ; Inst 7: fcmp s0, s0