Refactor AArch64 ABI support to extract common bits for shared impl with x64.

We have observed that the ABI implementations for AArch64 and x64 are
very similar; in fact, x64's implementation started as a modified copy
of AArch64's implementation. This is an artifact of both a similar ABI
(both machines pass args and return values in registers first, then the
stack, and both machines give considerable freedom with stack-frame
layout) and a too-low-level ABI abstraction in the existing design. For
machines that fit the mainstream or most common ABI-design idioms, we
should be able to do much better.

This commit factors AArch64 into machine-specific and
machine-independent parts, but does not yet modify x64; that will come
next.

This should be completely neutral with respect to compile time and
generated code performance.
This commit is contained in:
Chris Fallin
2020-08-12 20:31:35 -07:00
parent 38ef98700f
commit 5cf3fba3da
10 changed files with 2039 additions and 1693 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -7,8 +7,7 @@ use crate::ir;
use crate::ir::types::{F32X2, F32X4, F64X2, I16X4, I16X8, I32X2, I32X4, I64X2, I8X16, I8X8};
use crate::ir::Type;
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::lower::ty_bits;
use crate::machinst::MachLabel;
use crate::machinst::{ty_bits, MachLabel};
use regalloc::{RealRegUniverse, Reg, Writable};
@@ -119,9 +118,9 @@ pub enum MemLabel {
PCRel(i32),
}
/// A memory argument to load/store, encapsulating the possible addressing modes.
/// An addressing mode specified for a load/store operation.
#[derive(Clone, Debug)]
pub enum MemArg {
pub enum AMode {
//
// Real ARM64 addressing modes:
//
@@ -183,39 +182,39 @@ pub enum MemArg {
NominalSPOffset(i64, Type),
}
impl MemArg {
impl AMode {
/// Memory reference using an address in a register.
pub fn reg(reg: Reg) -> MemArg {
pub fn reg(reg: Reg) -> AMode {
// Use UnsignedOffset rather than Unscaled to use ldr rather than ldur.
// This also does not use PostIndexed / PreIndexed as they update the register.
MemArg::UnsignedOffset(reg, UImm12Scaled::zero(I64))
AMode::UnsignedOffset(reg, UImm12Scaled::zero(I64))
}
/// Memory reference using the sum of two registers as an address.
pub fn reg_plus_reg(reg1: Reg, reg2: Reg) -> MemArg {
MemArg::RegReg(reg1, reg2)
pub fn reg_plus_reg(reg1: Reg, reg2: Reg) -> AMode {
AMode::RegReg(reg1, reg2)
}
/// Memory reference using `reg1 + sizeof(ty) * reg2` as an address.
pub fn reg_plus_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> MemArg {
MemArg::RegScaled(reg1, reg2, ty)
pub fn reg_plus_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> AMode {
AMode::RegScaled(reg1, reg2, ty)
}
/// Memory reference using `reg1 + sizeof(ty) * reg2` as an address, with `reg2` sign- or
/// zero-extended as per `op`.
pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> MemArg {
MemArg::RegScaledExtended(reg1, reg2, ty, op)
pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> AMode {
AMode::RegScaledExtended(reg1, reg2, ty, op)
}
/// Memory reference to a label: a global function or value, or data in the constant pool.
pub fn label(label: MemLabel) -> MemArg {
MemArg::Label(label)
pub fn label(label: MemLabel) -> AMode {
AMode::Label(label)
}
}
/// A memory argument to a load/store-pair.
#[derive(Clone, Debug)]
pub enum PairMemArg {
pub enum PairAMode {
SignedOffset(Reg, SImm7Scaled),
PreIndexed(Writable<Reg>, SImm7Scaled),
PostIndexed(Writable<Reg>, SImm7Scaled),
@@ -381,27 +380,27 @@ fn shift_for_type(ty: Type) -> usize {
}
}
impl ShowWithRRU for MemArg {
impl ShowWithRRU for AMode {
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&MemArg::Unscaled(reg, simm9) => {
&AMode::Unscaled(reg, simm9) => {
if simm9.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), simm9.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&MemArg::UnsignedOffset(reg, uimm12) => {
&AMode::UnsignedOffset(reg, uimm12) => {
if uimm12.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), uimm12.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&MemArg::RegReg(r1, r2) => {
&AMode::RegReg(r1, r2) => {
format!("[{}, {}]", r1.show_rru(mb_rru), r2.show_rru(mb_rru),)
}
&MemArg::RegScaled(r1, r2, ty) => {
&AMode::RegScaled(r1, r2, ty) => {
let shift = shift_for_type(ty);
format!(
"[{}, {}, LSL #{}]",
@@ -410,7 +409,7 @@ impl ShowWithRRU for MemArg {
shift,
)
}
&MemArg::RegScaledExtended(r1, r2, ty, op) => {
&AMode::RegScaledExtended(r1, r2, ty, op) => {
let shift = shift_for_type(ty);
let size = match op {
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
@@ -425,7 +424,7 @@ impl ShowWithRRU for MemArg {
shift
)
}
&MemArg::RegExtended(r1, r2, op) => {
&AMode::RegExtended(r1, r2, op) => {
let size = match op {
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
_ => OperandSize::Size64,
@@ -438,44 +437,44 @@ impl ShowWithRRU for MemArg {
op,
)
}
&MemArg::Label(ref label) => label.show_rru(mb_rru),
&MemArg::PreIndexed(r, simm9) => format!(
&AMode::Label(ref label) => label.show_rru(mb_rru),
&AMode::PreIndexed(r, simm9) => format!(
"[{}, {}]!",
r.to_reg().show_rru(mb_rru),
simm9.show_rru(mb_rru)
),
&MemArg::PostIndexed(r, simm9) => format!(
&AMode::PostIndexed(r, simm9) => format!(
"[{}], {}",
r.to_reg().show_rru(mb_rru),
simm9.show_rru(mb_rru)
),
// Eliminated by `mem_finalize()`.
&MemArg::SPOffset(..)
| &MemArg::FPOffset(..)
| &MemArg::NominalSPOffset(..)
| &MemArg::RegOffset(..) => {
&AMode::SPOffset(..)
| &AMode::FPOffset(..)
| &AMode::NominalSPOffset(..)
| &AMode::RegOffset(..) => {
panic!("Unexpected pseudo mem-arg mode (stack-offset or generic reg-offset)!")
}
}
}
}
impl ShowWithRRU for PairMemArg {
impl ShowWithRRU for PairAMode {
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&PairMemArg::SignedOffset(reg, simm7) => {
&PairAMode::SignedOffset(reg, simm7) => {
if simm7.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), simm7.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&PairMemArg::PreIndexed(reg, simm7) => format!(
&PairAMode::PreIndexed(reg, simm7) => format!(
"[{}, {}]!",
reg.to_reg().show_rru(mb_rru),
simm7.show_rru(mb_rru)
),
&PairMemArg::PostIndexed(reg, simm7) => format!(
&PairAMode::PostIndexed(reg, simm7) => format!(
"[{}], {}",
reg.to_reg().show_rru(mb_rru),
simm7.show_rru(mb_rru)

View File

@@ -5,7 +5,7 @@ use crate::ir::constant::ConstantData;
use crate::ir::types::*;
use crate::ir::TrapCode;
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::lower::ty_bits;
use crate::machinst::ty_bits;
use regalloc::{Reg, RegClass, Writable};
@@ -26,22 +26,22 @@ pub fn memlabel_finalize(_insn_off: CodeOffset, label: &MemLabel) -> i32 {
/// of this amode.
pub fn mem_finalize(
insn_off: CodeOffset,
mem: &MemArg,
mem: &AMode,
state: &EmitState,
) -> (SmallVec<[Inst; 4]>, MemArg) {
) -> (SmallVec<[Inst; 4]>, AMode) {
match mem {
&MemArg::RegOffset(_, off, ty)
| &MemArg::SPOffset(off, ty)
| &MemArg::FPOffset(off, ty)
| &MemArg::NominalSPOffset(off, ty) => {
&AMode::RegOffset(_, off, ty)
| &AMode::SPOffset(off, ty)
| &AMode::FPOffset(off, ty)
| &AMode::NominalSPOffset(off, ty) => {
let basereg = match mem {
&MemArg::RegOffset(reg, _, _) => reg,
&MemArg::SPOffset(..) | &MemArg::NominalSPOffset(..) => stack_reg(),
&MemArg::FPOffset(..) => fp_reg(),
&AMode::RegOffset(reg, _, _) => reg,
&AMode::SPOffset(..) | &AMode::NominalSPOffset(..) => stack_reg(),
&AMode::FPOffset(..) => fp_reg(),
_ => unreachable!(),
};
let adj = match mem {
&MemArg::NominalSPOffset(..) => {
&AMode::NominalSPOffset(..) => {
debug!(
"mem_finalize: nominal SP offset {} + adj {} -> {}",
off,
@@ -55,10 +55,10 @@ pub fn mem_finalize(
let off = off + adj;
if let Some(simm9) = SImm9::maybe_from_i64(off) {
let mem = MemArg::Unscaled(basereg, simm9);
let mem = AMode::Unscaled(basereg, simm9);
(smallvec![], mem)
} else if let Some(uimm12s) = UImm12Scaled::maybe_from_i64(off, ty) {
let mem = MemArg::UnsignedOffset(basereg, uimm12s);
let mem = AMode::UnsignedOffset(basereg, uimm12s);
(smallvec![], mem)
} else {
let tmp = writable_spilltmp_reg();
@@ -75,13 +75,13 @@ pub fn mem_finalize(
extendop: ExtendOp::UXTX,
};
const_insts.push(add_inst);
(const_insts, MemArg::reg(tmp.to_reg()))
(const_insts, AMode::reg(tmp.to_reg()))
}
}
&MemArg::Label(ref label) => {
&AMode::Label(ref label) => {
let off = memlabel_finalize(insn_off, label);
(smallvec![], MemArg::Label(MemLabel::PCRel(off)))
(smallvec![], AMode::Label(MemLabel::PCRel(off)))
}
_ => (smallvec![], mem.clone()),
@@ -226,7 +226,7 @@ fn enc_ldst_reg(
Some(ExtendOp::SXTW) => 0b110,
Some(ExtendOp::SXTX) => 0b111,
None => 0b011, // LSL
_ => panic!("bad extend mode for ld/st MemArg"),
_ => panic!("bad extend mode for ld/st AMode"),
};
(op_31_22 << 22)
| (1 << 21)
@@ -780,32 +780,32 @@ impl MachInstEmit for Inst {
}
match &mem {
&MemArg::Unscaled(reg, simm9) => {
&AMode::Unscaled(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
}
&MemArg::UnsignedOffset(reg, uimm12scaled) => {
&AMode::UnsignedOffset(reg, uimm12scaled) => {
if uimm12scaled.value() != 0 {
assert_eq!(bits, ty_bits(uimm12scaled.scale_ty()));
}
sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd));
}
&MemArg::RegReg(r1, r2) => {
&AMode::RegReg(r1, r2) => {
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
));
}
&MemArg::RegScaled(r1, r2, ty) | &MemArg::RegScaledExtended(r1, r2, ty, _) => {
&AMode::RegScaled(r1, r2, ty) | &AMode::RegScaledExtended(r1, r2, ty, _) => {
assert_eq!(bits, ty_bits(ty));
let extendop = match &mem {
&MemArg::RegScaled(..) => None,
&MemArg::RegScaledExtended(_, _, _, op) => Some(op),
&AMode::RegScaled(..) => None,
&AMode::RegScaledExtended(_, _, _, op) => Some(op),
_ => unreachable!(),
};
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ true, extendop, rd,
));
}
&MemArg::RegExtended(r1, r2, extendop) => {
&AMode::RegExtended(r1, r2, extendop) => {
sink.put4(enc_ldst_reg(
op,
r1,
@@ -815,7 +815,7 @@ impl MachInstEmit for Inst {
rd,
));
}
&MemArg::Label(ref label) => {
&AMode::Label(ref label) => {
let offset = match label {
// cast i32 to u32 (two's-complement)
&MemLabel::PCRel(off) => off as u32,
@@ -843,17 +843,17 @@ impl MachInstEmit for Inst {
_ => panic!("Unspported size for LDR from constant pool!"),
}
}
&MemArg::PreIndexed(reg, simm9) => {
&AMode::PreIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd));
}
&MemArg::PostIndexed(reg, simm9) => {
&AMode::PostIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd));
}
// Eliminated by `mem_finalize()` above.
&MemArg::SPOffset(..)
| &MemArg::FPOffset(..)
| &MemArg::NominalSPOffset(..) => panic!("Should not see stack-offset here!"),
&MemArg::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
&AMode::SPOffset(..) | &AMode::FPOffset(..) | &AMode::NominalSPOffset(..) => {
panic!("Should not see stack-offset here!")
}
&AMode::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
}
}
@@ -916,32 +916,31 @@ impl MachInstEmit for Inst {
}
match &mem {
&MemArg::Unscaled(reg, simm9) => {
&AMode::Unscaled(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
}
&MemArg::UnsignedOffset(reg, uimm12scaled) => {
&AMode::UnsignedOffset(reg, uimm12scaled) => {
if uimm12scaled.value() != 0 {
assert_eq!(bits, ty_bits(uimm12scaled.scale_ty()));
}
sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd));
}
&MemArg::RegReg(r1, r2) => {
&AMode::RegReg(r1, r2) => {
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
));
}
&MemArg::RegScaled(r1, r2, _ty)
| &MemArg::RegScaledExtended(r1, r2, _ty, _) => {
&AMode::RegScaled(r1, r2, _ty) | &AMode::RegScaledExtended(r1, r2, _ty, _) => {
let extendop = match &mem {
&MemArg::RegScaled(..) => None,
&MemArg::RegScaledExtended(_, _, _, op) => Some(op),
&AMode::RegScaled(..) => None,
&AMode::RegScaledExtended(_, _, _, op) => Some(op),
_ => unreachable!(),
};
sink.put4(enc_ldst_reg(
op, r1, r2, /* scaled = */ true, extendop, rd,
));
}
&MemArg::RegExtended(r1, r2, extendop) => {
&AMode::RegExtended(r1, r2, extendop) => {
sink.put4(enc_ldst_reg(
op,
r1,
@@ -951,33 +950,33 @@ impl MachInstEmit for Inst {
rd,
));
}
&MemArg::Label(..) => {
&AMode::Label(..) => {
panic!("Store to a MemLabel not implemented!");
}
&MemArg::PreIndexed(reg, simm9) => {
&AMode::PreIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd));
}
&MemArg::PostIndexed(reg, simm9) => {
&AMode::PostIndexed(reg, simm9) => {
sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd));
}
// Eliminated by `mem_finalize()` above.
&MemArg::SPOffset(..)
| &MemArg::FPOffset(..)
| &MemArg::NominalSPOffset(..) => panic!("Should not see stack-offset here!"),
&MemArg::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
&AMode::SPOffset(..) | &AMode::FPOffset(..) | &AMode::NominalSPOffset(..) => {
panic!("Should not see stack-offset here!")
}
&AMode::RegOffset(..) => panic!("SHould not see generic reg-offset here!"),
}
}
&Inst::StoreP64 { rt, rt2, ref mem } => match mem {
&PairMemArg::SignedOffset(reg, simm7) => {
&PairAMode::SignedOffset(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100100, simm7, reg, rt, rt2));
}
&PairMemArg::PreIndexed(reg, simm7) => {
&PairAMode::PreIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100110, simm7, reg.to_reg(), rt, rt2));
}
&PairMemArg::PostIndexed(reg, simm7) => {
&PairAMode::PostIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100010, simm7, reg.to_reg(), rt, rt2));
}
@@ -986,15 +985,15 @@ impl MachInstEmit for Inst {
let rt = rt.to_reg();
let rt2 = rt2.to_reg();
match mem {
&PairMemArg::SignedOffset(reg, simm7) => {
&PairAMode::SignedOffset(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100101, simm7, reg, rt, rt2));
}
&PairMemArg::PreIndexed(reg, simm7) => {
&PairAMode::PreIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100111, simm7, reg.to_reg(), rt, rt2));
}
&PairMemArg::PostIndexed(reg, simm7) => {
&PairAMode::PostIndexed(reg, simm7) => {
assert_eq!(simm7.scale_ty, I64);
sink.put4(enc_ldst_pair(0b1010100011, simm7, reg.to_reg(), rt, rt2));
}
@@ -1475,7 +1474,7 @@ impl MachInstEmit for Inst {
&Inst::LoadFpuConst32 { rd, const_data } => {
let inst = Inst::FpuLoad32 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
@@ -1488,7 +1487,7 @@ impl MachInstEmit for Inst {
&Inst::LoadFpuConst64 { rd, const_data } => {
let inst = Inst::FpuLoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
@@ -1501,7 +1500,7 @@ impl MachInstEmit for Inst {
&Inst::LoadFpuConst128 { rd, const_data } => {
let inst = Inst::FpuLoad128 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
};
inst.emit(sink, flags, state);
@@ -1970,7 +1969,7 @@ impl MachInstEmit for Inst {
// Load value out of jump table
let inst = Inst::SLoad32 {
rd: rtmp2,
mem: MemArg::reg_plus_reg_scaled_extended(
mem: AMode::reg_plus_reg_scaled_extended(
rtmp1.to_reg(),
rtmp2.to_reg(),
I32,
@@ -2018,7 +2017,7 @@ impl MachInstEmit for Inst {
&Inst::LoadConst64 { rd, const_data } => {
let inst = Inst::ULoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink, flags, state);
@@ -2036,7 +2035,7 @@ impl MachInstEmit for Inst {
} => {
let inst = Inst::ULoad64 {
rd,
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None, // can't cause a user trap.
};
inst.emit(sink, flags, state);
@@ -2058,8 +2057,8 @@ impl MachInstEmit for Inst {
}
let (reg, offset) = match mem {
MemArg::Unscaled(r, simm9) => (r, simm9.value()),
MemArg::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
AMode::Unscaled(r, simm9) => (r, simm9.value()),
AMode::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
_ => panic!("Unsupported case for LoadAddr: {:?}", mem),
};
let abs_offset = if offset < 0 {
@@ -2085,7 +2084,7 @@ impl MachInstEmit for Inst {
};
add.emit(sink, flags, state);
} else {
// Use `tmp2` here: `reg` may be `spilltmp` if the `MemArg` on this instruction
// Use `tmp2` here: `reg` may be `spilltmp` if the `AMode` on this instruction
// was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note
// that no other instructions will be inserted here (we're emitting directly),
// and a live range of `tmp2` should not span this instruction, so this use

View File

@@ -1079,7 +1079,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad8 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"41004038",
@@ -1088,7 +1088,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad8 {
rd: writable_xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::zero(I8)),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::zero(I8)),
srcloc: None,
},
"41004039",
@@ -1097,7 +1097,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad8 {
rd: writable_xreg(1),
mem: MemArg::RegReg(xreg(2), xreg(5)),
mem: AMode::RegReg(xreg(2), xreg(5)),
srcloc: None,
},
"41686538",
@@ -1106,7 +1106,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad8 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"41008038",
@@ -1115,7 +1115,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad8 {
rd: writable_xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(63, I8).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(63, I8).unwrap()),
srcloc: None,
},
"41FC8039",
@@ -1124,7 +1124,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad8 {
rd: writable_xreg(1),
mem: MemArg::RegReg(xreg(2), xreg(5)),
mem: AMode::RegReg(xreg(2), xreg(5)),
srcloc: None,
},
"4168A538",
@@ -1133,7 +1133,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad16 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::maybe_from_i64(5).unwrap()),
mem: AMode::Unscaled(xreg(2), SImm9::maybe_from_i64(5).unwrap()),
srcloc: None,
},
"41504078",
@@ -1142,7 +1142,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad16 {
rd: writable_xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(8, I16).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(8, I16).unwrap()),
srcloc: None,
},
"41104079",
@@ -1151,7 +1151,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad16 {
rd: writable_xreg(1),
mem: MemArg::RegScaled(xreg(2), xreg(3), I16),
mem: AMode::RegScaled(xreg(2), xreg(3), I16),
srcloc: None,
},
"41786378",
@@ -1160,7 +1160,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad16 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"41008078",
@@ -1169,7 +1169,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad16 {
rd: writable_xreg(28),
mem: MemArg::UnsignedOffset(xreg(20), UImm12Scaled::maybe_from_i64(24, I16).unwrap()),
mem: AMode::UnsignedOffset(xreg(20), UImm12Scaled::maybe_from_i64(24, I16).unwrap()),
srcloc: None,
},
"9C328079",
@@ -1178,7 +1178,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad16 {
rd: writable_xreg(28),
mem: MemArg::RegScaled(xreg(20), xreg(20), I16),
mem: AMode::RegScaled(xreg(20), xreg(20), I16),
srcloc: None,
},
"9C7AB478",
@@ -1187,7 +1187,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad32 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"410040B8",
@@ -1196,7 +1196,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad32 {
rd: writable_xreg(12),
mem: MemArg::UnsignedOffset(xreg(0), UImm12Scaled::maybe_from_i64(204, I32).unwrap()),
mem: AMode::UnsignedOffset(xreg(0), UImm12Scaled::maybe_from_i64(204, I32).unwrap()),
srcloc: None,
},
"0CCC40B9",
@@ -1205,7 +1205,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad32 {
rd: writable_xreg(1),
mem: MemArg::RegScaled(xreg(2), xreg(12), I32),
mem: AMode::RegScaled(xreg(2), xreg(12), I32),
srcloc: None,
},
"41786CB8",
@@ -1214,7 +1214,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad32 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"410080B8",
@@ -1223,7 +1223,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad32 {
rd: writable_xreg(12),
mem: MemArg::UnsignedOffset(xreg(1), UImm12Scaled::maybe_from_i64(16380, I32).unwrap()),
mem: AMode::UnsignedOffset(xreg(1), UImm12Scaled::maybe_from_i64(16380, I32).unwrap()),
srcloc: None,
},
"2CFCBFB9",
@@ -1232,7 +1232,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::SLoad32 {
rd: writable_xreg(1),
mem: MemArg::RegScaled(xreg(5), xreg(1), I32),
mem: AMode::RegScaled(xreg(5), xreg(1), I32),
srcloc: None,
},
"A178A1B8",
@@ -1241,7 +1241,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"410040F8",
@@ -1250,7 +1250,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::maybe_from_i64(-256).unwrap()),
mem: AMode::Unscaled(xreg(2), SImm9::maybe_from_i64(-256).unwrap()),
srcloc: None,
},
"410050F8",
@@ -1259,7 +1259,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::maybe_from_i64(255).unwrap()),
mem: AMode::Unscaled(xreg(2), SImm9::maybe_from_i64(255).unwrap()),
srcloc: None,
},
"41F04FF8",
@@ -1268,7 +1268,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(32760, I64).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(32760, I64).unwrap()),
srcloc: None,
},
"41FC7FF9",
@@ -1277,7 +1277,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegReg(xreg(2), xreg(3)),
mem: AMode::RegReg(xreg(2), xreg(3)),
srcloc: None,
},
"416863F8",
@@ -1286,7 +1286,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegScaled(xreg(2), xreg(3), I64),
mem: AMode::RegScaled(xreg(2), xreg(3), I64),
srcloc: None,
},
"417863F8",
@@ -1295,7 +1295,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegScaledExtended(xreg(2), xreg(3), I64, ExtendOp::SXTW),
mem: AMode::RegScaledExtended(xreg(2), xreg(3), I64, ExtendOp::SXTW),
srcloc: None,
},
"41D863F8",
@@ -1304,7 +1304,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegExtended(xreg(2), xreg(3), ExtendOp::SXTW),
mem: AMode::RegExtended(xreg(2), xreg(3), ExtendOp::SXTW),
srcloc: None,
},
"41C863F8",
@@ -1313,7 +1313,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::Label(MemLabel::PCRel(64)),
mem: AMode::Label(MemLabel::PCRel(64)),
srcloc: None,
},
"01020058",
@@ -1322,7 +1322,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::PreIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::PreIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
srcloc: None,
},
"410C41F8",
@@ -1331,7 +1331,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::PostIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::PostIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
srcloc: None,
},
"410441F8",
@@ -1340,7 +1340,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::FPOffset(32768, I8),
mem: AMode::FPOffset(32768, I8),
srcloc: None,
},
"100090D2B063308B010240F9",
@@ -1349,7 +1349,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::FPOffset(-32768, I8),
mem: AMode::FPOffset(-32768, I8),
srcloc: None,
},
"F0FF8F92B063308B010240F9",
@@ -1358,7 +1358,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::FPOffset(1048576, I8), // 2^20
mem: AMode::FPOffset(1048576, I8), // 2^20
srcloc: None,
},
"1002A0D2B063308B010240F9",
@@ -1367,7 +1367,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::FPOffset(1048576 + 1, I8), // 2^20 + 1
mem: AMode::FPOffset(1048576 + 1, I8), // 2^20 + 1
srcloc: None,
},
"300080D21002A0F2B063308B010240F9",
@@ -1377,7 +1377,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegOffset(xreg(7), 8, I64),
mem: AMode::RegOffset(xreg(7), 8, I64),
srcloc: None,
},
"E18040F8",
@@ -1387,7 +1387,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegOffset(xreg(7), 1024, I64),
mem: AMode::RegOffset(xreg(7), 1024, I64),
srcloc: None,
},
"E10042F9",
@@ -1397,7 +1397,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::ULoad64 {
rd: writable_xreg(1),
mem: MemArg::RegOffset(xreg(7), 1048576, I64),
mem: AMode::RegOffset(xreg(7), 1048576, I64),
srcloc: None,
},
"1002A0D2F060308B010240F9",
@@ -1407,7 +1407,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store8 {
rd: xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"41000038",
@@ -1416,7 +1416,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store8 {
rd: xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(4095, I8).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(4095, I8).unwrap()),
srcloc: None,
},
"41FC3F39",
@@ -1425,7 +1425,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store16 {
rd: xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"41000078",
@@ -1434,7 +1434,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store16 {
rd: xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(8190, I16).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(8190, I16).unwrap()),
srcloc: None,
},
"41FC3F79",
@@ -1443,7 +1443,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store32 {
rd: xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"410000B8",
@@ -1452,7 +1452,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store32 {
rd: xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(16380, I32).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(16380, I32).unwrap()),
srcloc: None,
},
"41FC3FB9",
@@ -1461,7 +1461,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::Unscaled(xreg(2), SImm9::zero()),
mem: AMode::Unscaled(xreg(2), SImm9::zero()),
srcloc: None,
},
"410000F8",
@@ -1470,7 +1470,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(32760, I64).unwrap()),
mem: AMode::UnsignedOffset(xreg(2), UImm12Scaled::maybe_from_i64(32760, I64).unwrap()),
srcloc: None,
},
"41FC3FF9",
@@ -1479,7 +1479,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::RegReg(xreg(2), xreg(3)),
mem: AMode::RegReg(xreg(2), xreg(3)),
srcloc: None,
},
"416823F8",
@@ -1488,7 +1488,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::RegScaled(xreg(2), xreg(3), I64),
mem: AMode::RegScaled(xreg(2), xreg(3), I64),
srcloc: None,
},
"417823F8",
@@ -1497,7 +1497,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::RegScaledExtended(xreg(2), xreg(3), I64, ExtendOp::UXTW),
mem: AMode::RegScaledExtended(xreg(2), xreg(3), I64, ExtendOp::UXTW),
srcloc: None,
},
"415823F8",
@@ -1506,7 +1506,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::RegExtended(xreg(2), xreg(3), ExtendOp::UXTW),
mem: AMode::RegExtended(xreg(2), xreg(3), ExtendOp::UXTW),
srcloc: None,
},
"414823F8",
@@ -1515,7 +1515,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::PreIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::PreIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
srcloc: None,
},
"410C01F8",
@@ -1524,7 +1524,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::Store64 {
rd: xreg(1),
mem: MemArg::PostIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::PostIndexed(writable_xreg(2), SImm9::maybe_from_i64(16).unwrap()),
srcloc: None,
},
"410401F8",
@@ -1535,7 +1535,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(8),
rt2: xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::zero(I64)),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::zero(I64)),
},
"482500A9",
"stp x8, x9, [x10]",
@@ -1544,7 +1544,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(8),
rt2: xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(504, I64).unwrap()),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(504, I64).unwrap()),
},
"48A51FA9",
"stp x8, x9, [x10, #504]",
@@ -1553,7 +1553,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(8),
rt2: xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(-64, I64).unwrap()),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(-64, I64).unwrap()),
},
"48253CA9",
"stp x8, x9, [x10, #-64]",
@@ -1562,7 +1562,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(21),
rt2: xreg(28),
mem: PairMemArg::SignedOffset(xreg(1), SImm7Scaled::maybe_from_i64(-512, I64).unwrap()),
mem: PairAMode::SignedOffset(xreg(1), SImm7Scaled::maybe_from_i64(-512, I64).unwrap()),
},
"357020A9",
"stp x21, x28, [x1, #-512]",
@@ -1571,7 +1571,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(8),
rt2: xreg(9),
mem: PairMemArg::PreIndexed(
mem: PairAMode::PreIndexed(
writable_xreg(10),
SImm7Scaled::maybe_from_i64(-64, I64).unwrap(),
),
@@ -1583,7 +1583,7 @@ fn test_aarch64_binemit() {
Inst::StoreP64 {
rt: xreg(15),
rt2: xreg(16),
mem: PairMemArg::PostIndexed(
mem: PairAMode::PostIndexed(
writable_xreg(20),
SImm7Scaled::maybe_from_i64(504, I64).unwrap(),
),
@@ -1596,7 +1596,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::zero(I64)),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::zero(I64)),
},
"482540A9",
"ldp x8, x9, [x10]",
@@ -1605,7 +1605,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(504, I64).unwrap()),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(504, I64).unwrap()),
},
"48A55FA9",
"ldp x8, x9, [x10, #504]",
@@ -1614,7 +1614,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(9),
mem: PairMemArg::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(-64, I64).unwrap()),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(-64, I64).unwrap()),
},
"48257CA9",
"ldp x8, x9, [x10, #-64]",
@@ -1623,10 +1623,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(9),
mem: PairMemArg::SignedOffset(
xreg(10),
SImm7Scaled::maybe_from_i64(-512, I64).unwrap(),
),
mem: PairAMode::SignedOffset(xreg(10), SImm7Scaled::maybe_from_i64(-512, I64).unwrap()),
},
"482560A9",
"ldp x8, x9, [x10, #-512]",
@@ -1635,7 +1632,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(9),
mem: PairMemArg::PreIndexed(
mem: PairAMode::PreIndexed(
writable_xreg(10),
SImm7Scaled::maybe_from_i64(-64, I64).unwrap(),
),
@@ -1647,7 +1644,7 @@ fn test_aarch64_binemit() {
Inst::LoadP64 {
rt: writable_xreg(8),
rt2: writable_xreg(25),
mem: PairMemArg::PostIndexed(
mem: PairAMode::PostIndexed(
writable_xreg(12),
SImm7Scaled::maybe_from_i64(504, I64).unwrap(),
),
@@ -4143,7 +4140,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad32 {
rd: writable_vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), F32),
mem: AMode::RegScaled(xreg(8), xreg(9), F32),
srcloc: None,
},
"107969BC",
@@ -4153,7 +4150,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad64 {
rd: writable_vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), F64),
mem: AMode::RegScaled(xreg(8), xreg(9), F64),
srcloc: None,
},
"107969FC",
@@ -4163,7 +4160,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad128 {
rd: writable_vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), I128),
mem: AMode::RegScaled(xreg(8), xreg(9), I128),
srcloc: None,
},
"1079E93C",
@@ -4173,7 +4170,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad32 {
rd: writable_vreg(16),
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
},
"5000001C",
@@ -4183,7 +4180,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad64 {
rd: writable_vreg(16),
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
},
"5000005C",
@@ -4193,7 +4190,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuLoad128 {
rd: writable_vreg(16),
mem: MemArg::Label(MemLabel::PCRel(8)),
mem: AMode::Label(MemLabel::PCRel(8)),
srcloc: None,
},
"5000009C",
@@ -4203,7 +4200,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuStore32 {
rd: vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), F32),
mem: AMode::RegScaled(xreg(8), xreg(9), F32),
srcloc: None,
},
"107929BC",
@@ -4213,7 +4210,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuStore64 {
rd: vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), F64),
mem: AMode::RegScaled(xreg(8), xreg(9), F64),
srcloc: None,
},
"107929FC",
@@ -4223,7 +4220,7 @@ fn test_aarch64_binemit() {
insns.push((
Inst::FpuStore128 {
rd: vreg(16),
mem: MemArg::RegScaled(xreg(8), xreg(9), I128),
mem: AMode::RegScaled(xreg(8), xreg(9), I128),
srcloc: None,
},
"1079A93C",

View File

@@ -463,68 +463,68 @@ pub enum Inst {
/// An unsigned (zero-extending) 8-bit load.
ULoad8 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A signed (sign-extending) 8-bit load.
SLoad8 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// An unsigned (zero-extending) 16-bit load.
ULoad16 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A signed (sign-extending) 16-bit load.
SLoad16 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// An unsigned (zero-extending) 32-bit load.
ULoad32 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A signed (sign-extending) 32-bit load.
SLoad32 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A 64-bit load.
ULoad64 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// An 8-bit store.
Store8 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A 16-bit store.
Store16 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A 32-bit store.
Store32 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// A 64-bit store.
Store64 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
@@ -532,13 +532,13 @@ pub enum Inst {
StoreP64 {
rt: Reg,
rt2: Reg,
mem: PairMemArg,
mem: PairAMode,
},
/// A load of a pair of registers.
LoadP64 {
rt: Writable<Reg>,
rt2: Writable<Reg>,
mem: PairMemArg,
mem: PairAMode,
},
/// A MOV instruction. These are encoded as ORR's (AluRRR form) but we
@@ -734,37 +734,37 @@ pub enum Inst {
/// Floating-point load, single-precision (32 bit).
FpuLoad32 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// Floating-point store, single-precision (32 bit).
FpuStore32 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// Floating-point load, double-precision (64 bit).
FpuLoad64 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// Floating-point store, double-precision (64 bit).
FpuStore64 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// Floating-point/vector load, 128 bit.
FpuLoad128 {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
/// Floating-point/vector store, 128 bit.
FpuStore128 {
rd: Reg,
mem: MemArg,
mem: AMode,
srcloc: Option<SourceLoc>,
},
@@ -1050,11 +1050,11 @@ pub enum Inst {
/// Load address referenced by `mem` into `rd`.
LoadAddr {
rd: Writable<Reg>,
mem: MemArg,
mem: AMode,
},
/// Marker, no-op in generated code: SP "virtual offset" is adjusted. This
/// controls how MemArg::NominalSPOffset args are lowered.
/// controls how AMode::NominalSPOffset args are lowered.
VirtualSPOffsetAdj {
offset: i64,
},
@@ -1215,45 +1215,119 @@ impl Inst {
const_data: value,
}
}
/// Generic constructor for a load (zero-extending where appropriate).
pub fn gen_load(into_reg: Writable<Reg>, mem: AMode, ty: Type) -> Inst {
match ty {
B1 | B8 | I8 => Inst::ULoad8 {
rd: into_reg,
mem,
srcloc: None,
},
B16 | I16 => Inst::ULoad16 {
rd: into_reg,
mem,
srcloc: None,
},
B32 | I32 | R32 => Inst::ULoad32 {
rd: into_reg,
mem,
srcloc: None,
},
B64 | I64 | R64 => Inst::ULoad64 {
rd: into_reg,
mem,
srcloc: None,
},
F32 => Inst::FpuLoad32 {
rd: into_reg,
mem,
srcloc: None,
},
F64 => Inst::FpuLoad64 {
rd: into_reg,
mem,
srcloc: None,
},
_ => unimplemented!("gen_load({})", ty),
}
}
/// Generic constructor for a store.
pub fn gen_store(mem: AMode, from_reg: Reg, ty: Type) -> Inst {
match ty {
B1 | B8 | I8 => Inst::Store8 {
rd: from_reg,
mem,
srcloc: None,
},
B16 | I16 => Inst::Store16 {
rd: from_reg,
mem,
srcloc: None,
},
B32 | I32 | R32 => Inst::Store32 {
rd: from_reg,
mem,
srcloc: None,
},
B64 | I64 | R64 => Inst::Store64 {
rd: from_reg,
mem,
srcloc: None,
},
F32 => Inst::FpuStore32 {
rd: from_reg,
mem,
srcloc: None,
},
F64 => Inst::FpuStore64 {
rd: from_reg,
mem,
srcloc: None,
},
_ => unimplemented!("gen_store({})", ty),
}
}
}
//=============================================================================
// Instructions: get_regs
fn memarg_regs(memarg: &MemArg, collector: &mut RegUsageCollector) {
fn memarg_regs(memarg: &AMode, collector: &mut RegUsageCollector) {
match memarg {
&MemArg::Unscaled(reg, ..) | &MemArg::UnsignedOffset(reg, ..) => {
&AMode::Unscaled(reg, ..) | &AMode::UnsignedOffset(reg, ..) => {
collector.add_use(reg);
}
&MemArg::RegReg(r1, r2, ..)
| &MemArg::RegScaled(r1, r2, ..)
| &MemArg::RegScaledExtended(r1, r2, ..)
| &MemArg::RegExtended(r1, r2, ..) => {
&AMode::RegReg(r1, r2, ..)
| &AMode::RegScaled(r1, r2, ..)
| &AMode::RegScaledExtended(r1, r2, ..)
| &AMode::RegExtended(r1, r2, ..) => {
collector.add_use(r1);
collector.add_use(r2);
}
&MemArg::Label(..) => {}
&MemArg::PreIndexed(reg, ..) | &MemArg::PostIndexed(reg, ..) => {
&AMode::Label(..) => {}
&AMode::PreIndexed(reg, ..) | &AMode::PostIndexed(reg, ..) => {
collector.add_mod(reg);
}
&MemArg::FPOffset(..) => {
&AMode::FPOffset(..) => {
collector.add_use(fp_reg());
}
&MemArg::SPOffset(..) | &MemArg::NominalSPOffset(..) => {
&AMode::SPOffset(..) | &AMode::NominalSPOffset(..) => {
collector.add_use(stack_reg());
}
&MemArg::RegOffset(r, ..) => {
&AMode::RegOffset(r, ..) => {
collector.add_use(r);
}
}
}
fn pairmemarg_regs(pairmemarg: &PairMemArg, collector: &mut RegUsageCollector) {
fn pairmemarg_regs(pairmemarg: &PairAMode, collector: &mut RegUsageCollector) {
match pairmemarg {
&PairMemArg::SignedOffset(reg, ..) => {
&PairAMode::SignedOffset(reg, ..) => {
collector.add_use(reg);
}
&PairMemArg::PreIndexed(reg, ..) | &PairMemArg::PostIndexed(reg, ..) => {
&PairAMode::PreIndexed(reg, ..) | &PairAMode::PostIndexed(reg, ..) => {
collector.add_mod(reg);
}
}
@@ -1627,36 +1701,36 @@ fn aarch64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
}
}
fn map_mem<RUM: RegUsageMapper>(m: &RUM, mem: &mut MemArg) {
fn map_mem<RUM: RegUsageMapper>(m: &RUM, mem: &mut AMode) {
// N.B.: we take only the pre-map here, but this is OK because the
// only addressing modes that update registers (pre/post-increment on
// AArch64) both read and write registers, so they are "mods" rather
// than "defs", so must be the same in both the pre- and post-map.
match mem {
&mut MemArg::Unscaled(ref mut reg, ..) => map_use(m, reg),
&mut MemArg::UnsignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut MemArg::RegReg(ref mut r1, ref mut r2)
| &mut MemArg::RegScaled(ref mut r1, ref mut r2, ..)
| &mut MemArg::RegScaledExtended(ref mut r1, ref mut r2, ..)
| &mut MemArg::RegExtended(ref mut r1, ref mut r2, ..) => {
&mut AMode::Unscaled(ref mut reg, ..) => map_use(m, reg),
&mut AMode::UnsignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut AMode::RegReg(ref mut r1, ref mut r2)
| &mut AMode::RegScaled(ref mut r1, ref mut r2, ..)
| &mut AMode::RegScaledExtended(ref mut r1, ref mut r2, ..)
| &mut AMode::RegExtended(ref mut r1, ref mut r2, ..) => {
map_use(m, r1);
map_use(m, r2);
}
&mut MemArg::Label(..) => {}
&mut MemArg::PreIndexed(ref mut r, ..) => map_mod(m, r),
&mut MemArg::PostIndexed(ref mut r, ..) => map_mod(m, r),
&mut MemArg::FPOffset(..)
| &mut MemArg::SPOffset(..)
| &mut MemArg::NominalSPOffset(..) => {}
&mut MemArg::RegOffset(ref mut r, ..) => map_use(m, r),
&mut AMode::Label(..) => {}
&mut AMode::PreIndexed(ref mut r, ..) => map_mod(m, r),
&mut AMode::PostIndexed(ref mut r, ..) => map_mod(m, r),
&mut AMode::FPOffset(..)
| &mut AMode::SPOffset(..)
| &mut AMode::NominalSPOffset(..) => {}
&mut AMode::RegOffset(ref mut r, ..) => map_use(m, r),
};
}
fn map_pairmem<RUM: RegUsageMapper>(m: &RUM, mem: &mut PairMemArg) {
fn map_pairmem<RUM: RegUsageMapper>(m: &RUM, mem: &mut PairAMode) {
match mem {
&mut PairMemArg::SignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut PairMemArg::PreIndexed(ref mut reg, ..) => map_def(m, reg),
&mut PairMemArg::PostIndexed(ref mut reg, ..) => map_def(m, reg),
&mut PairAMode::SignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut PairAMode::PreIndexed(ref mut reg, ..) => map_def(m, reg),
&mut PairAMode::PostIndexed(ref mut reg, ..) => map_def(m, reg),
}
}
@@ -2432,10 +2506,10 @@ impl MachInst for Inst {
// Pretty-printing of instructions.
fn mem_finalize_for_show(
mem: &MemArg,
mem: &AMode,
mb_rru: Option<&RealRegUniverse>,
state: &EmitState,
) -> (String, MemArg) {
) -> (String, AMode) {
let (mem_insts, mem) = mem_finalize(0, mem, state);
let mut mem_str = mem_insts
.into_iter()
@@ -2646,7 +2720,7 @@ impl Inst {
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let is_unscaled = match &mem {
&MemArg::Unscaled(..) => true,
&AMode::Unscaled(..) => true,
_ => false,
};
let (op, size) = match (self, is_unscaled) {
@@ -2694,7 +2768,7 @@ impl Inst {
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let is_unscaled = match &mem {
&MemArg::Unscaled(..) => true,
&AMode::Unscaled(..) => true,
_ => false,
};
let (op, size) = match (self, is_unscaled) {
@@ -3350,8 +3424,8 @@ impl Inst {
ret.push_str(&inst.show_rru(mb_rru));
}
let (reg, offset) = match mem {
MemArg::Unscaled(r, simm9) => (r, simm9.value()),
MemArg::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
AMode::Unscaled(r, simm9) => (r, simm9.value()),
AMode::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
_ => panic!("Unsupported case for LoadAddr: {:?}", mem),
};
let abs_offset = if offset < 0 {

View File

@@ -663,7 +663,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
elem_ty: Type,
roots: &[InsnInput],
offset: i32,
) -> MemArg {
) -> AMode {
// TODO: support base_reg + scale * index_reg. For this, we would need to pattern-match shl or
// mul instructions (Load/StoreComplex don't include scale factors).
@@ -680,26 +680,26 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
offset
);
// First, decide what the `MemArg` will be. Take one extendee and one 64-bit
// First, decide what the `AMode` will be. Take one extendee and one 64-bit
// reg, or two 64-bit regs, or a 64-bit reg and a 32-bit reg with extension,
// or some other combination as appropriate.
let memarg = if addends64.len() > 0 {
if addends32.len() > 0 {
let (reg32, extendop) = addends32.pop().unwrap();
let reg64 = addends64.pop().unwrap();
MemArg::RegExtended(reg64, reg32, extendop)
AMode::RegExtended(reg64, reg32, extendop)
} else if offset > 0 && offset < 0x1000 {
let reg64 = addends64.pop().unwrap();
let off = offset;
offset = 0;
MemArg::RegOffset(reg64, off, elem_ty)
AMode::RegOffset(reg64, off, elem_ty)
} else if addends64.len() >= 2 {
let reg1 = addends64.pop().unwrap();
let reg2 = addends64.pop().unwrap();
MemArg::RegReg(reg1, reg2)
AMode::RegReg(reg1, reg2)
} else {
let reg1 = addends64.pop().unwrap();
MemArg::reg(reg1)
AMode::reg(reg1)
}
} else
/* addends64.len() == 0 */
@@ -720,9 +720,9 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
to_bits: 64,
});
if let Some((reg2, extendop)) = addends32.pop() {
MemArg::RegExtended(tmp.to_reg(), reg2, extendop)
AMode::RegExtended(tmp.to_reg(), reg2, extendop)
} else {
MemArg::reg(tmp.to_reg())
AMode::reg(tmp.to_reg())
}
} else
/* addends32.len() == 0 */
@@ -730,32 +730,32 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
let off_reg = ctx.alloc_tmp(RegClass::I64, I64);
lower_constant_u64(ctx, off_reg, offset as u64);
offset = 0;
MemArg::reg(off_reg.to_reg())
AMode::reg(off_reg.to_reg())
}
};
// At this point, if we have any remaining components, we need to allocate a
// temp, replace one of the registers in the MemArg with the temp, and emit
// temp, replace one of the registers in the AMode with the temp, and emit
// instructions to add together the remaining components. Return immediately
// if this is *not* the case.
if offset == 0 && addends32.len() == 0 && addends64.len() == 0 {
return memarg;
}
// Allocate the temp and shoehorn it into the MemArg.
// Allocate the temp and shoehorn it into the AMode.
let addr = ctx.alloc_tmp(RegClass::I64, I64);
let (reg, memarg) = match memarg {
MemArg::RegExtended(r1, r2, extendop) => {
(r1, MemArg::RegExtended(addr.to_reg(), r2, extendop))
AMode::RegExtended(r1, r2, extendop) => {
(r1, AMode::RegExtended(addr.to_reg(), r2, extendop))
}
MemArg::RegOffset(r, off, ty) => (r, MemArg::RegOffset(addr.to_reg(), off, ty)),
MemArg::RegReg(r1, r2) => (r2, MemArg::RegReg(addr.to_reg(), r1)),
MemArg::UnsignedOffset(r, imm) => (r, MemArg::UnsignedOffset(addr.to_reg(), imm)),
AMode::RegOffset(r, off, ty) => (r, AMode::RegOffset(addr.to_reg(), off, ty)),
AMode::RegReg(r1, r2) => (r2, AMode::RegReg(addr.to_reg(), r1)),
AMode::UnsignedOffset(r, imm) => (r, AMode::UnsignedOffset(addr.to_reg(), imm)),
_ => unreachable!(),
};
// If there is any offset, load that first into `addr`, and add the `reg`
// that we kicked out of the `MemArg`; otherwise, start with that reg.
// that we kicked out of the `AMode`; otherwise, start with that reg.
if offset != 0 {
// If we can fit offset or -offset in an imm12, use an add-imm
// to combine the reg and offset. Otherwise, load value first then add.
@@ -994,37 +994,6 @@ pub(crate) fn condcode_is_signed(cc: IntCC) -> bool {
//=============================================================================
// Helpers for instruction lowering.
/// Returns the size (in bits) of a given type.
pub fn ty_bits(ty: Type) -> usize {
match ty {
B1 => 1,
B8 | I8 => 8,
B16 | I16 => 16,
B32 | I32 | F32 | R32 => 32,
B64 | I64 | F64 | R64 => 64,
B128 | I128 => 128,
IFLAGS | FFLAGS => 32,
B8X8 | I8X8 | B16X4 | I16X4 | B32X2 | I32X2 => 64,
B8X16 | I8X16 | B16X8 | I16X8 | B32X4 | I32X4 | B64X2 | I64X2 => 128,
F32X4 | F64X2 => 128,
_ => panic!("ty_bits() on unknown type: {:?}", ty),
}
}
pub(crate) fn ty_is_int(ty: Type) -> bool {
match ty {
B1 | B8 | I8 | B16 | I16 | B32 | I32 | B64 | I64 | R32 | R64 => true,
F32 | F64 | B128 | F32X2 | F32X4 | F64X2 | I128 | I8X8 | I8X16 | I16X4 | I16X8 | I32X2
| I32X4 | I64X2 => false,
IFLAGS | FFLAGS => panic!("Unexpected flags type"),
_ => panic!("ty_is_int() on unknown type: {:?}", ty),
}
}
pub(crate) fn ty_is_float(ty: Type) -> bool {
!ty_is_int(ty)
}
pub(crate) fn choose_32_64<T: Copy>(ty: Type, op32: T, op64: T) -> T {
let bits = ty_bits(ty);
if bits <= 32 {

View File

@@ -1010,7 +1010,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
| Opcode::Sload32Complex => true,
_ => false,
};
let is_float = ty_is_float(elem_ty);
let is_float = ty_has_float_or_vec_representation(elem_ty);
let mem = lower_address(ctx, elem_ty, &inputs[..], off);
let rd = get_output_reg(ctx, outputs[0]);
@@ -1074,7 +1074,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Store | Opcode::StoreComplex => ctx.input_ty(insn, 0),
_ => unreachable!(),
};
let is_float = ty_is_float(elem_ty);
let is_float = ty_has_float_or_vec_representation(elem_ty);
let mem = lower_address(ctx, elem_ty, &inputs[1..], off);
let rd = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
@@ -1291,9 +1291,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
let ty = ctx.output_ty(insn, 0);
let bits = ty_bits(ty);
if ty_is_float(ty) && bits == 32 {
let is_float = ty_has_float_or_vec_representation(ty);
if is_float && bits == 32 {
ctx.emit(Inst::FpuCSel32 { cond, rd, rn, rm });
} else if ty_is_float(ty) && bits == 64 {
} else if is_float && bits == 64 {
ctx.emit(Inst::FpuCSel64 { cond, rd, rn, rm });
} else {
ctx.emit(Inst::CSel { cond, rd, rn, rm });
@@ -1315,9 +1316,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
let ty = ctx.output_ty(insn, 0);
let bits = ty_bits(ty);
if ty_is_float(ty) && bits == 32 {
let is_float = ty_has_float_or_vec_representation(ty);
if is_float && bits == 32 {
ctx.emit(Inst::FpuCSel32 { cond, rd, rn, rm });
} else if ty_is_float(ty) && bits == 64 {
} else if is_float && bits == 64 {
ctx.emit(Inst::FpuCSel64 { cond, rd, rn, rm });
} else {
ctx.emit(Inst::CSel { cond, rd, rn, rm });
@@ -1521,7 +1523,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rd = get_output_reg(ctx, outputs[0]);
let ity = ctx.input_ty(insn, 0);
let oty = ctx.output_ty(insn, 0);
match (ty_is_float(ity), ty_is_float(oty)) {
let ity_vec_reg = ty_has_float_or_vec_representation(ity);
let oty_vec_reg = ty_has_float_or_vec_representation(oty);
match (ity_vec_reg, oty_vec_reg) {
(true, true) => {
let narrow_mode = if ty_bits(ity) <= 32 && ty_bits(oty) <= 32 {
NarrowValueMode::ZeroExtend32
@@ -1809,7 +1813,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let size = VectorSize::from_ty(ctx.input_ty(insn, 0));
let ty = ty.unwrap();
if ty_is_int(ty) {
if ty_has_int_representation(ty) {
ctx.emit(Inst::MovFromVec { rd, rn, idx, size });
// Plain moves are faster on some processors.
} else if idx == 0 {
@@ -1837,7 +1841,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::gen_move(rd, rm, ty));
if ty_is_int(input_ty) {
if ty_has_int_representation(input_ty) {
ctx.emit(Inst::MovToVec { rd, rn, idx, size });
} else {
ctx.emit(Inst::VecMovElement {
@@ -1855,7 +1859,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let rd = get_output_reg(ctx, outputs[0]);
let input_ty = ctx.input_ty(insn, 0);
let size = VectorSize::from_ty(ty.unwrap());
let inst = if ty_is_int(input_ty) {
let inst = if ty_has_int_representation(input_ty) {
Inst::VecDup { rd, rn, size }
} else {
Inst::VecDupFromFpu { rd, rn, size }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
//! Miscellaneous helpers for machine backends.
use crate::ir::Type;
/// Returns the size (in bits) of a given type.
pub fn ty_bits(ty: Type) -> usize {
usize::from(ty.bits())
}
/// Is the type represented by an integer (not float) at the machine level?
pub(crate) fn ty_has_int_representation(ty: Type) -> bool {
ty.is_int() || ty.is_bool() || ty.is_ref()
}
/// Is the type represented by a float or vector value at the machine level?
pub(crate) fn ty_has_float_or_vec_representation(ty: Type) -> bool {
ty.is_vector() || ty.is_float()
}

View File

@@ -123,12 +123,16 @@ pub mod blockorder;
pub use blockorder::*;
pub mod abi;
pub use abi::*;
pub mod abi_impl;
pub use abi_impl::*;
pub mod pretty_print;
pub use pretty_print::*;
pub mod buffer;
pub use buffer::*;
pub mod adapter;
pub use adapter::*;
pub mod helpers;
pub use helpers::*;
/// A machine instruction.
pub trait MachInst: Clone + Debug {