aarch64: Migrate uextend/sextend to ISLE

This commit migrates the sign/zero extension instructions from
`lower_inst.rs` to ISLE. There's actually a fair amount going on in this
migration since a few other pieces needed touching up along the way as
well:

* First is the actual migration of `uextend` and `sextend`. These
  instructions are relatively simple but end up having a number of special
  cases. I've attempted to replicate all the cases here but
  double-checks would be good.

* This commit actually fixes a few issues where if the result of a vector
  extraction is sign/zero-extended into i128 that actually results in
  panics in the current backend.

* This commit adds exhaustive testing for
  extension-of-a-vector-extraction is a noop wrt extraction.

* A bugfix around ISLE glue was required to get this commit working,
  notably the case where the `RegMapper` implementation was trying to
  map an input to an output (meaning ISLE was passing through an input
  unmodified to the output) wasn't working. This requires a `mov`
  instruction to be generated and this commit updates the glue to do
  this. At the same time this commit updates the ISLE glue to share more
  infrastructure between x64 and aarch64 so both backends get this fix
  instead of just aarch64.

Overall I think that the translation to ISLE was a net benefit for these
instructions. It's relatively obvious what all the cases are now unlike
before where it took a few reads of the code and some boolean switches
to figure out which path was taken for each flavor of input. I think
there's still possible improvements here where, for example, the
`put_in_reg_{s,z}ext64` helper doesn't use this logic so technically
those helpers could also pattern match the "well atomic loads and vector
extractions automatically do this for us" but that's a possible future
improvement for later (and shouldn't be too too hard with some ISLE
refactoring).
This commit is contained in:
Alex Crichton
2021-11-30 09:40:58 -08:00
parent 20e090b114
commit d89410ec4e
11 changed files with 937 additions and 391 deletions

View File

@@ -10,7 +10,7 @@ use super::{
Inst as MInst, JTSequenceInfo, MachLabel, MoveWideConst, NarrowValueMode, Opcode, OperandSize,
PairAMode, Reg, ScalarSize, ShiftOpAndAmt, UImm5, VectorSize, NZCV,
};
use crate::isa::aarch64::settings as aarch64_settings;
use crate::isa::aarch64::settings::Flags;
use crate::machinst::isle::*;
use crate::{
binemit::CodeOffset,
@@ -21,9 +21,8 @@ use crate::{
isa::aarch64::inst::aarch64_map_regs,
isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
isa::unwind::UnwindInst,
machinst::{get_output_reg, ty_bits, InsnOutput, LowerCtx, RegRenamer},
machinst::{ty_bits, InsnOutput, LowerCtx},
};
use smallvec::SmallVec;
use std::boxed::Box;
use std::vec::Vec;
@@ -36,62 +35,21 @@ type BoxExternalName = Box<ExternalName>;
/// The main entry point for lowering with ISLE.
pub(crate) fn lower<C>(
lower_ctx: &mut C,
isa_flags: &aarch64_settings::Flags,
isa_flags: &Flags,
outputs: &[InsnOutput],
inst: Inst,
) -> Result<(), ()>
where
C: LowerCtx<I = MInst>,
{
// TODO: reuse the ISLE context across lowerings so we can reuse its
// internal heap allocations.
let mut isle_ctx = IsleContext::new(lower_ctx, isa_flags);
let temp_regs = generated_code::constructor_lower(&mut isle_ctx, inst).ok_or(())?;
let mut temp_regs = temp_regs.regs().iter();
#[cfg(debug_assertions)]
{
let all_dsts_len = outputs
.iter()
.map(|out| get_output_reg(isle_ctx.lower_ctx, *out).len())
.sum();
debug_assert_eq!(
temp_regs.len(),
all_dsts_len,
"the number of temporary registers and destination registers do \
not match ({} != {}); ensure the correct registers are being \
returned.",
temp_regs.len(),
all_dsts_len,
);
}
// The ISLE generated code emits its own registers to define the
// instruction's lowered values in. We rename those registers to the
// registers they were assigned when their value was used as an operand in
// earlier lowerings.
let mut renamer = RegRenamer::default();
for output in outputs {
let dsts = get_output_reg(isle_ctx.lower_ctx, *output);
for (temp, dst) in temp_regs.by_ref().zip(dsts.regs()) {
renamer.add_rename(*temp, dst.to_reg());
}
}
for mut inst in isle_ctx.into_emitted_insts() {
aarch64_map_regs(&mut inst, &renamer);
lower_ctx.emit(inst);
}
Ok(())
}
pub struct IsleContext<'a, C> {
lower_ctx: &'a mut C,
#[allow(dead_code)] // dead for now, but probably not for long
isa_flags: &'a aarch64_settings::Flags,
emitted_insts: SmallVec<[MInst; 6]>,
lower_common(
lower_ctx,
isa_flags,
outputs,
inst,
|cx, insn| generated_code::constructor_lower(cx, insn),
aarch64_map_regs,
)
}
pub struct ExtendedValue {
@@ -99,21 +57,12 @@ pub struct ExtendedValue {
extend: ExtendOp,
}
impl<'a, C> IsleContext<'a, C> {
pub fn new(lower_ctx: &'a mut C, isa_flags: &'a aarch64_settings::Flags) -> Self {
IsleContext {
lower_ctx,
isa_flags,
emitted_insts: SmallVec::new(),
}
}
pub fn into_emitted_insts(self) -> SmallVec<[MInst; 6]> {
self.emitted_insts
}
pub struct SinkableAtomicLoad {
atomic_load: Inst,
atomic_addr: Value,
}
impl<'a, C> generated_code::Context for IsleContext<'a, C>
impl<C> generated_code::Context for IsleContext<'_, C, Flags, 6>
where
C: LowerCtx<I = MInst>,
{
@@ -275,4 +224,23 @@ where
n => Some(n as u64),
}
}
fn sinkable_atomic_load(&mut self, val: Value) -> Option<SinkableAtomicLoad> {
let input = self.lower_ctx.get_value_as_source_or_const(val);
if let Some((atomic_load, 0)) = input.inst {
if self.lower_ctx.data(atomic_load).opcode() == Opcode::AtomicLoad {
let atomic_addr = self.lower_ctx.input_as_value(atomic_load, 0);
return Some(SinkableAtomicLoad {
atomic_load,
atomic_addr,
});
}
}
None
}
fn sink_atomic_load(&mut self, load: &SinkableAtomicLoad) -> Reg {
self.lower_ctx.sink_inst(load.atomic_load);
self.put_in_reg(load.atomic_addr)
}
}

View File

@@ -1,4 +1,4 @@
src/clif.isle be1359b4b6b153f378517c1dd95cd80f4a6bed0c7b86eaba11c088fd71b7bfe80a3c868ace245b2da0bfbbd6ded262ea9576c8e0eeacbf89d03c34a17a709602
src/prelude.isle d3d2a6a42fb778231a4cdca30995324e1293a9ca8073c5a27a501535759eb51f84a6718322a93dfba4b66ee4f0c9afce7dcec0428516ef0c5bc96e8c8b76925d
src/isa/aarch64/inst.isle cec03d88680e8da01424eecc05ef73a48e4055d29fe841fceaa3e6ea4e7cb9abb887401bb5acb2e058c9fc993188640990b699e88272d62e243781b231cdfb0d
src/isa/aarch64/lower.isle e1ae53adc953ad395feeecd8edc8bcfd288491a4e4a71510e5f06e221f767518c6e060ff0d795c7c2510b7d898cc8b9bc0313906412e0176605c33427926f828
src/isa/aarch64/inst.isle 70d7b319ba0b28173d2ef1820bd0e9c4b8cf7a5ab34475a43f03bdc5a6b945a7faf40d7b539a12050ddd8ebc4c6b0fe82df5940eaf966420bb4d58e7420d4206
src/isa/aarch64/lower.isle dfc622b2fecea98079fff182ce3443ada5448256662f598ea009caed3d9bcf6b4816f736a8c7f70142467febf8fc97230c57287f06e80e6101f3b401208c599c

View File

@@ -73,6 +73,8 @@ pub trait Context {
fn zero_reg(&mut self) -> Reg;
fn writable_zero_reg(&mut self) -> WritableReg;
fn load_constant64_full(&mut self, arg0: u64) -> Reg;
fn sinkable_atomic_load(&mut self, arg0: Value) -> Option<SinkableAtomicLoad>;
fn sink_atomic_load(&mut self, arg0: &SinkableAtomicLoad) -> Reg;
fn safe_divisor_from_imm64(&mut self, arg0: Imm64) -> Option<u64>;
}
@@ -1598,31 +1600,126 @@ pub fn constructor_vec_rr_long<C: Context>(
return Some(expr4_0);
}
// Generated as internal constructor for term mov_from_vec.
pub fn constructor_mov_from_vec<C: Context>(
ctx: &mut C,
arg0: Reg,
arg1: u8,
arg2: &VectorSize,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
// Rule at src/isa/aarch64/inst.isle line 1520.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::MovFromVec {
rd: expr1_0,
rn: pattern0_0,
idx: pattern1_0,
size: pattern2_0.clone(),
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term mov_from_vec_signed.
pub fn constructor_mov_from_vec_signed<C: Context>(
ctx: &mut C,
arg0: Reg,
arg1: u8,
arg2: &VectorSize,
arg3: &OperandSize,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1527.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::MovFromVecSigned {
rd: expr1_0,
rn: pattern0_0,
idx: pattern1_0,
size: pattern2_0.clone(),
scalar_size: pattern3_0.clone(),
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term extend.
pub fn constructor_extend<C: Context>(
ctx: &mut C,
arg0: Reg,
arg1: bool,
arg2: u8,
arg3: u8,
) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
let pattern3_0 = arg3;
// Rule at src/isa/aarch64/inst.isle line 1534.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::Extend {
rd: expr1_0,
rn: pattern0_0,
signed: pattern1_0,
from_bits: pattern2_0,
to_bits: pattern3_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term load_acquire.
pub fn constructor_load_acquire<C: Context>(ctx: &mut C, arg0: Type, arg1: Reg) -> Option<Reg> {
let pattern0_0 = arg0;
let pattern1_0 = arg1;
// Rule at src/isa/aarch64/inst.isle line 1541.
let expr0_0: Type = I64;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = MInst::LoadAcquire {
access_ty: pattern0_0,
rt: expr1_0,
rn: pattern1_0,
};
let expr3_0 = C::emit(ctx, &expr2_0);
let expr4_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr4_0);
}
// Generated as internal constructor for term imm.
pub fn constructor_imm<C: Context>(ctx: &mut C, arg0: Type, arg1: u64) -> Option<Reg> {
let pattern0_0 = arg0;
if let Some(pattern1_0) = C::integral_ty(ctx, pattern0_0) {
let pattern2_0 = arg1;
if let Some(pattern3_0) = C::imm_logic_from_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1531.
// Rule at src/isa/aarch64/inst.isle line 1559.
let expr0_0 = ALUOp::Orr64;
let expr1_0 = C::zero_reg(ctx);
let expr2_0 = constructor_alu_rr_imm_logic(ctx, &expr0_0, expr1_0, pattern3_0)?;
return Some(expr2_0);
}
if let Some(pattern3_0) = C::move_wide_const_from_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1523.
// Rule at src/isa/aarch64/inst.isle line 1551.
let expr0_0 = OperandSize::Size64;
let expr1_0 = constructor_movz(ctx, pattern3_0, &expr0_0)?;
return Some(expr1_0);
}
if let Some(pattern3_0) = C::move_wide_const_from_negated_u64(ctx, pattern2_0) {
// Rule at src/isa/aarch64/inst.isle line 1527.
// Rule at src/isa/aarch64/inst.isle line 1555.
let expr0_0 = OperandSize::Size64;
let expr1_0 = constructor_movn(ctx, pattern3_0, &expr0_0)?;
return Some(expr1_0);
}
// Rule at src/isa/aarch64/inst.isle line 1538.
// Rule at src/isa/aarch64/inst.isle line 1566.
let expr0_0 = C::load_constant64_full(ctx, pattern2_0);
return Some(expr0_0);
}
@@ -1634,28 +1731,18 @@ pub fn constructor_put_in_reg_sext64<C: Context>(ctx: &mut C, arg0: Value) -> Op
let pattern0_0 = arg0;
let pattern1_0 = C::value_type(ctx, pattern0_0);
if pattern1_0 == I64 {
// Rule at src/isa/aarch64/inst.isle line 1552.
// Rule at src/isa/aarch64/inst.isle line 1577.
let expr0_0 = C::put_in_reg(ctx, pattern0_0);
return Some(expr0_0);
}
if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) {
// Rule at src/isa/aarch64/inst.isle line 1545.
let expr0_0: Type = I32;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = C::put_in_reg(ctx, pattern0_0);
let expr3_0: bool = true;
let expr4_0 = C::ty_bits(ctx, pattern2_0);
let expr5_0: u8 = 64;
let expr6_0 = MInst::Extend {
rd: expr1_0,
rn: expr2_0,
signed: expr3_0,
from_bits: expr4_0,
to_bits: expr5_0,
};
let expr7_0 = C::emit(ctx, &expr6_0);
let expr8_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr8_0);
// Rule at src/isa/aarch64/inst.isle line 1573.
let expr0_0 = C::put_in_reg(ctx, pattern0_0);
let expr1_0: bool = true;
let expr2_0 = C::ty_bits(ctx, pattern2_0);
let expr3_0: u8 = 64;
let expr4_0 = constructor_extend(ctx, expr0_0, expr1_0, expr2_0, expr3_0)?;
return Some(expr4_0);
}
return None;
}
@@ -1665,28 +1752,18 @@ pub fn constructor_put_in_reg_zext64<C: Context>(ctx: &mut C, arg0: Value) -> Op
let pattern0_0 = arg0;
let pattern1_0 = C::value_type(ctx, pattern0_0);
if pattern1_0 == I64 {
// Rule at src/isa/aarch64/inst.isle line 1563.
// Rule at src/isa/aarch64/inst.isle line 1585.
let expr0_0 = C::put_in_reg(ctx, pattern0_0);
return Some(expr0_0);
}
if let Some(pattern2_0) = C::fits_in_32(ctx, pattern1_0) {
// Rule at src/isa/aarch64/inst.isle line 1556.
let expr0_0: Type = I32;
let expr1_0 = C::temp_writable_reg(ctx, expr0_0);
let expr2_0 = C::put_in_reg(ctx, pattern0_0);
let expr3_0: bool = false;
let expr4_0 = C::ty_bits(ctx, pattern2_0);
let expr5_0: u8 = 64;
let expr6_0 = MInst::Extend {
rd: expr1_0,
rn: expr2_0,
signed: expr3_0,
from_bits: expr4_0,
to_bits: expr5_0,
};
let expr7_0 = C::emit(ctx, &expr6_0);
let expr8_0 = C::writable_reg_to_reg(ctx, expr1_0);
return Some(expr8_0);
// Rule at src/isa/aarch64/inst.isle line 1581.
let expr0_0 = C::put_in_reg(ctx, pattern0_0);
let expr1_0: bool = false;
let expr2_0 = C::ty_bits(ctx, pattern2_0);
let expr3_0: u8 = 64;
let expr4_0 = constructor_extend(ctx, expr0_0, expr1_0, expr2_0, expr3_0)?;
return Some(expr4_0);
}
return None;
}
@@ -1694,7 +1771,7 @@ pub fn constructor_put_in_reg_zext64<C: Context>(ctx: &mut C, arg0: Value) -> Op
// Generated as internal constructor for term trap_if_zero_divisor.
pub fn constructor_trap_if_zero_divisor<C: Context>(ctx: &mut C, arg0: Reg) -> Option<Reg> {
let pattern0_0 = arg0;
// Rule at src/isa/aarch64/inst.isle line 1568.
// Rule at src/isa/aarch64/inst.isle line 1590.
let expr0_0 = C::cond_br_zero(ctx, pattern0_0);
let expr1_0 = C::trap_code_division_by_zero(ctx);
let expr2_0 = MInst::TrapIf {
@@ -1709,12 +1786,12 @@ pub fn constructor_trap_if_zero_divisor<C: Context>(ctx: &mut C, arg0: Reg) -> O
pub fn constructor_size_from_ty<C: Context>(ctx: &mut C, arg0: Type) -> Option<OperandSize> {
let pattern0_0 = arg0;
if pattern0_0 == I64 {
// Rule at src/isa/aarch64/inst.isle line 1574.
// Rule at src/isa/aarch64/inst.isle line 1596.
let expr0_0 = OperandSize::Size64;
return Some(expr0_0);
}
if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) {
// Rule at src/isa/aarch64/inst.isle line 1573.
// Rule at src/isa/aarch64/inst.isle line 1595.
let expr0_0 = OperandSize::Size32;
return Some(expr0_0);
}
@@ -1731,7 +1808,7 @@ pub fn constructor_trap_if_div_overflow<C: Context>(
let pattern0_0 = arg0;
let pattern1_0 = arg1;
let pattern2_0 = arg2;
// Rule at src/isa/aarch64/inst.isle line 1580.
// Rule at src/isa/aarch64/inst.isle line 1602.
let expr0_0 = constructor_adds_op(ctx, pattern0_0)?;
let expr1_0 = C::writable_zero_reg(ctx);
let expr2_0: u8 = 1;
@@ -1775,12 +1852,12 @@ pub fn constructor_trap_if_div_overflow<C: Context>(
pub fn constructor_adds_op<C: Context>(ctx: &mut C, arg0: Type) -> Option<ALUOp> {
let pattern0_0 = arg0;
if pattern0_0 == I64 {
// Rule at src/isa/aarch64/inst.isle line 1600.
// Rule at src/isa/aarch64/inst.isle line 1622.
let expr0_0 = ALUOp::AddS64;
return Some(expr0_0);
}
if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) {
// Rule at src/isa/aarch64/inst.isle line 1599.
// Rule at src/isa/aarch64/inst.isle line 1621.
let expr0_0 = ALUOp::AddS32;
return Some(expr0_0);
}
@@ -1826,78 +1903,196 @@ pub fn constructor_lower<C: Context>(ctx: &mut C, arg0: Inst) -> Option<ValueReg
}
if pattern2_0 == I128 {
let pattern4_0 = C::inst_data(ctx, pattern0_0);
if let &InstructionData::Binary {
opcode: ref pattern5_0,
args: ref pattern5_1,
} = &pattern4_0
{
match &pattern5_0 {
&Opcode::Iadd => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 94.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = constructor_add64_with_flags(ctx, expr2_0, expr7_0)?;
let expr11_0 = constructor_adc64(ctx, expr4_0, expr9_0)?;
let expr12_0 = constructor_with_flags(ctx, &expr10_0, &expr11_0)?;
return Some(expr12_0);
match &pattern4_0 {
&InstructionData::Binary {
opcode: ref pattern5_0,
args: ref pattern5_1,
} => {
match &pattern5_0 {
&Opcode::Iadd => {
let (pattern7_0, pattern7_1) =
C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 94.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = constructor_add64_with_flags(ctx, expr2_0, expr7_0)?;
let expr11_0 = constructor_adc64(ctx, expr4_0, expr9_0)?;
let expr12_0 = constructor_with_flags(ctx, &expr10_0, &expr11_0)?;
return Some(expr12_0);
}
&Opcode::Isub => {
let (pattern7_0, pattern7_1) =
C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 145.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = constructor_sub64_with_flags(ctx, expr2_0, expr7_0)?;
let expr11_0 = constructor_sbc64(ctx, expr4_0, expr9_0)?;
let expr12_0 = constructor_with_flags(ctx, &expr10_0, &expr11_0)?;
return Some(expr12_0);
}
&Opcode::Imul => {
let (pattern7_0, pattern7_1) =
C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 200.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = ALUOp::UMulH;
let expr11_0 = constructor_alu_rrr(ctx, &expr10_0, expr2_0, expr7_0)?;
let expr12_0 = ALUOp3::MAdd64;
let expr13_0 =
constructor_alu_rrrr(ctx, &expr12_0, expr2_0, expr9_0, expr11_0)?;
let expr14_0 = ALUOp3::MAdd64;
let expr15_0 =
constructor_alu_rrrr(ctx, &expr14_0, expr4_0, expr7_0, expr13_0)?;
let expr16_0 = ALUOp3::MAdd64;
let expr17_0 = C::zero_reg(ctx);
let expr18_0 =
constructor_alu_rrrr(ctx, &expr16_0, expr2_0, expr7_0, expr17_0)?;
let expr19_0 = C::value_regs(ctx, expr18_0, expr15_0);
return Some(expr19_0);
}
_ => {}
}
&Opcode::Isub => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 145.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = constructor_sub64_with_flags(ctx, expr2_0, expr7_0)?;
let expr11_0 = constructor_sbc64(ctx, expr4_0, expr9_0)?;
let expr12_0 = constructor_with_flags(ctx, &expr10_0, &expr11_0)?;
return Some(expr12_0);
}
&Opcode::Imul => {
let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 200.
let expr0_0 = C::put_in_regs(ctx, pattern7_0);
let expr1_0: usize = 0;
let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0);
let expr3_0: usize = 1;
let expr4_0 = C::value_regs_get(ctx, expr0_0, expr3_0);
let expr5_0 = C::put_in_regs(ctx, pattern7_1);
let expr6_0: usize = 0;
let expr7_0 = C::value_regs_get(ctx, expr5_0, expr6_0);
let expr8_0: usize = 1;
let expr9_0 = C::value_regs_get(ctx, expr5_0, expr8_0);
let expr10_0 = ALUOp::UMulH;
let expr11_0 = constructor_alu_rrr(ctx, &expr10_0, expr2_0, expr7_0)?;
let expr12_0 = ALUOp3::MAdd64;
let expr13_0 =
constructor_alu_rrrr(ctx, &expr12_0, expr2_0, expr9_0, expr11_0)?;
let expr14_0 = ALUOp3::MAdd64;
let expr15_0 =
constructor_alu_rrrr(ctx, &expr14_0, expr4_0, expr7_0, expr13_0)?;
let expr16_0 = ALUOp3::MAdd64;
let expr17_0 = C::zero_reg(ctx);
let expr18_0 =
constructor_alu_rrrr(ctx, &expr16_0, expr2_0, expr7_0, expr17_0)?;
let expr19_0 = C::value_regs(ctx, expr18_0, expr15_0);
return Some(expr19_0);
}
_ => {}
}
&InstructionData::Unary {
opcode: ref pattern5_0,
arg: pattern5_1,
} => {
match &pattern5_0 {
&Opcode::Uextend => {
if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) {
let pattern8_0 = C::inst_data(ctx, pattern7_0);
if let &InstructionData::BinaryImm8 {
opcode: ref pattern9_0,
arg: pattern9_1,
imm: pattern9_2,
} = &pattern8_0
{
if let &Opcode::Extractlane = &pattern9_0 {
let pattern11_0 = C::value_type(ctx, pattern9_1);
let pattern12_0 = C::u8_from_uimm8(ctx, pattern9_2);
// Rule at src/isa/aarch64/lower.isle line 533.
let expr0_0 = C::put_in_reg(ctx, pattern9_1);
let expr1_0 = constructor_vector_size(ctx, pattern11_0)?;
let expr2_0 = constructor_mov_from_vec(
ctx,
expr0_0,
pattern12_0,
&expr1_0,
)?;
let expr3_0: Type = I64;
let expr4_0: u64 = 0;
let expr5_0 = constructor_imm(ctx, expr3_0, expr4_0)?;
let expr6_0 = C::value_regs(ctx, expr2_0, expr5_0);
return Some(expr6_0);
}
}
}
// Rule at src/isa/aarch64/lower.isle line 528.
let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern5_1)?;
let expr1_0: Type = I64;
let expr2_0: u64 = 0;
let expr3_0 = constructor_imm(ctx, expr1_0, expr2_0)?;
let expr4_0 = C::value_regs(ctx, expr0_0, expr3_0);
return Some(expr4_0);
}
&Opcode::Sextend => {
if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) {
let pattern8_0 = C::inst_data(ctx, pattern7_0);
if let &InstructionData::BinaryImm8 {
opcode: ref pattern9_0,
arg: pattern9_1,
imm: pattern9_2,
} = &pattern8_0
{
if let &Opcode::Extractlane = &pattern9_0 {
let pattern11_0 = C::value_type(ctx, pattern9_1);
if pattern11_0 == I64X2 {
let pattern13_0 = C::u8_from_uimm8(ctx, pattern9_2);
// Rule at src/isa/aarch64/lower.isle line 581.
let expr0_0 = C::put_in_reg(ctx, pattern9_1);
let expr1_0 = VectorSize::Size64x2;
let expr2_0 = constructor_mov_from_vec(
ctx,
expr0_0,
pattern13_0,
&expr1_0,
)?;
let expr3_0 = ALUOp::Asr64;
let expr4_0: u8 = 63;
let expr5_0 = C::imm_shift_from_u8(ctx, expr4_0);
let expr6_0 = constructor_alu_rr_imm_shift(
ctx, &expr3_0, expr2_0, expr5_0,
)?;
let expr7_0 = C::value_regs(ctx, expr2_0, expr6_0);
return Some(expr7_0);
}
if let Some(()) = C::not_i64x2(ctx, pattern11_0) {
let pattern13_0 = C::u8_from_uimm8(ctx, pattern9_2);
// Rule at src/isa/aarch64/lower.isle line 568.
let expr0_0 = C::put_in_reg(ctx, pattern9_1);
let expr1_0 =
constructor_vector_size(ctx, pattern11_0)?;
let expr2_0: Type = I64;
let expr3_0 = constructor_size_from_ty(ctx, expr2_0)?;
let expr4_0 = constructor_mov_from_vec_signed(
ctx,
expr0_0,
pattern13_0,
&expr1_0,
&expr3_0,
)?;
let expr5_0 = ALUOp::Asr64;
let expr6_0: u8 = 63;
let expr7_0 = C::imm_shift_from_u8(ctx, expr6_0);
let expr8_0 = constructor_alu_rr_imm_shift(
ctx, &expr5_0, expr4_0, expr7_0,
)?;
let expr9_0 = C::value_regs(ctx, expr4_0, expr8_0);
return Some(expr9_0);
}
}
}
}
// Rule at src/isa/aarch64/lower.isle line 556.
let expr0_0 = constructor_put_in_reg_sext64(ctx, pattern5_1)?;
let expr1_0 = ALUOp::Asr64;
let expr2_0: u8 = 63;
let expr3_0 = C::imm_shift_from_u8(ctx, expr2_0);
let expr4_0 =
constructor_alu_rr_imm_shift(ctx, &expr1_0, expr0_0, expr3_0)?;
let expr5_0 = C::value_regs(ctx, expr0_0, expr4_0);
return Some(expr5_0);
}
_ => {}
}
}
_ => {}
}
}
if pattern2_0 == I16X8 {
@@ -3035,14 +3230,100 @@ pub fn constructor_lower<C: Context>(ctx: &mut C, arg0: Inst) -> Option<ValueReg
opcode: ref pattern5_0,
arg: pattern5_1,
} => {
if let &Opcode::Ineg = &pattern5_0 {
// Rule at src/isa/aarch64/lower.isle line 186.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::zero_reg(ctx);
let expr2_0 = C::put_in_reg(ctx, pattern5_1);
let expr3_0 = constructor_alu_rrr(ctx, &expr0_0, expr1_0, expr2_0)?;
let expr4_0 = C::value_reg(ctx, expr3_0);
return Some(expr4_0);
match &pattern5_0 {
&Opcode::Ineg => {
// Rule at src/isa/aarch64/lower.isle line 186.
let expr0_0 = constructor_isub_op(ctx, pattern3_0)?;
let expr1_0 = C::zero_reg(ctx);
let expr2_0 = C::put_in_reg(ctx, pattern5_1);
let expr3_0 = constructor_alu_rrr(ctx, &expr0_0, expr1_0, expr2_0)?;
let expr4_0 = C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
&Opcode::Uextend => {
if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) {
let pattern8_0 = C::inst_data(ctx, pattern7_0);
if let &InstructionData::BinaryImm8 {
opcode: ref pattern9_0,
arg: pattern9_1,
imm: pattern9_2,
} = &pattern8_0
{
if let &Opcode::Extractlane = &pattern9_0 {
let pattern11_0 = C::value_type(ctx, pattern9_1);
let pattern12_0 = C::u8_from_uimm8(ctx, pattern9_2);
// Rule at src/isa/aarch64/lower.isle line 515.
let expr0_0 = C::put_in_reg(ctx, pattern9_1);
let expr1_0 = constructor_vector_size(ctx, pattern11_0)?;
let expr2_0 = constructor_mov_from_vec(
ctx,
expr0_0,
pattern12_0,
&expr1_0,
)?;
let expr3_0 = C::value_reg(ctx, expr2_0);
return Some(expr3_0);
}
}
}
let pattern7_0 = C::value_type(ctx, pattern5_1);
if let Some(pattern8_0) = C::sinkable_atomic_load(ctx, pattern5_1) {
// Rule at src/isa/aarch64/lower.isle line 522.
let expr0_0 = C::sink_atomic_load(ctx, &pattern8_0);
let expr1_0 = constructor_load_acquire(ctx, pattern7_0, expr0_0)?;
let expr2_0 = C::value_reg(ctx, expr1_0);
return Some(expr2_0);
}
// Rule at src/isa/aarch64/lower.isle line 510.
let expr0_0 = C::put_in_reg(ctx, pattern5_1);
let expr1_0: bool = false;
let expr2_0 = C::ty_bits(ctx, pattern7_0);
let expr3_0 = C::ty_bits(ctx, pattern3_0);
let expr4_0 =
constructor_extend(ctx, expr0_0, expr1_0, expr2_0, expr3_0)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
&Opcode::Sextend => {
if let Some(pattern7_0) = C::def_inst(ctx, pattern5_1) {
let pattern8_0 = C::inst_data(ctx, pattern7_0);
if let &InstructionData::BinaryImm8 {
opcode: ref pattern9_0,
arg: pattern9_1,
imm: pattern9_2,
} = &pattern8_0
{
if let &Opcode::Extractlane = &pattern9_0 {
let pattern11_0 = C::value_type(ctx, pattern9_1);
let pattern12_0 = C::u8_from_uimm8(ctx, pattern9_2);
// Rule at src/isa/aarch64/lower.isle line 547.
let expr0_0 = C::put_in_reg(ctx, pattern9_1);
let expr1_0 = constructor_vector_size(ctx, pattern11_0)?;
let expr2_0 = constructor_size_from_ty(ctx, pattern3_0)?;
let expr3_0 = constructor_mov_from_vec_signed(
ctx,
expr0_0,
pattern12_0,
&expr1_0,
&expr2_0,
)?;
let expr4_0 = C::value_reg(ctx, expr3_0);
return Some(expr4_0);
}
}
}
let pattern7_0 = C::value_type(ctx, pattern5_1);
// Rule at src/isa/aarch64/lower.isle line 542.
let expr0_0 = C::put_in_reg(ctx, pattern5_1);
let expr1_0: bool = true;
let expr2_0 = C::ty_bits(ctx, pattern7_0);
let expr3_0 = C::ty_bits(ctx, pattern3_0);
let expr4_0 =
constructor_extend(ctx, expr0_0, expr1_0, expr2_0, expr3_0)?;
let expr5_0 = C::value_reg(ctx, expr4_0);
return Some(expr5_0);
}
_ => {}
}
}
_ => {}