ARM64 backend, part 4 / 11: ARM64 instruction definitions.

This patch provides the bottom layer of the ARM64 backend: it defines
the `Inst` type, which represents a single machine instruction, and
defines emission routines to produce machine code from a `VCode`
container of `Insts`. The backend cannot produce `Inst`s with just this
patch; that will come with later parts.

This patch contains code written by Julian Seward <jseward@acm.org> and
Benjamin Bouvier <public@benj.me>, originally developed on a side-branch
before rebasing and condensing into this patch series. See the `arm64`
branch at `https://github.com/cfallin/wasmtime` for original development
history.

This patch also contains code written by Joey Gouly
<joey.gouly@arm.com> and contributed to the above branch. These
contributions are "Copyright (c) 2020, Arm Limited."

Finally, a contribution from Joey Gouly contains the following notice:

    This is a port of VIXL's Assembler::IsImmLogical.

    Arm has the original copyright on the VIXL code this was ported from
    and is relicensing it under Apache 2 for Cranelift.

Co-authored-by: Julian Seward <jseward@acm.org>
Co-authored-by: Benjamin Bouvier <public@benj.me>
Co-authored-by: Joey Gouly <joey.gouly@arm.com>
This commit is contained in:
Chris Fallin
2020-04-09 12:36:21 -07:00
parent d83574261c
commit 548ce947bf
6 changed files with 8149 additions and 1 deletions

View File

@@ -0,0 +1,501 @@
//! ARM64 ISA definitions: instruction arguments.
#![allow(dead_code)]
#![allow(non_snake_case)]
use crate::binemit::{CodeOffset, CodeSink};
use crate::ir::constant::{ConstantData, ConstantOffset};
use crate::ir::Type;
use crate::isa::arm64::inst::*;
use crate::machinst::*;
use regalloc::{
RealReg, RealRegUniverse, Reg, RegClass, RegClassInfo, SpillSlot, VirtualReg, Writable,
NUM_REG_CLASSES,
};
use std::string::{String, ToString};
/// A shift operator for a register or immediate.
#[derive(Clone, Copy, Debug)]
pub enum ShiftOp {
ASR,
LSR,
LSL,
ROR,
}
impl ShiftOp {
/// Get the encoding of this shift op.
pub fn bits(&self) -> u8 {
match self {
&ShiftOp::LSL => 0b00,
&ShiftOp::LSR => 0b01,
&ShiftOp::ASR => 0b10,
&ShiftOp::ROR => 0b11,
}
}
}
/// A shift operator with an amount, guaranteed to be within range.
#[derive(Clone, Debug)]
pub struct ShiftOpAndAmt {
op: ShiftOp,
shift: ShiftOpShiftImm,
}
/// A shift operator amount.
#[derive(Clone, Copy, Debug)]
pub struct ShiftOpShiftImm(u8);
impl ShiftOpShiftImm {
/// Maximum shift for shifted-register operands.
pub const MAX_SHIFT: u64 = 63;
/// Create a new shiftop shift amount, if possible.
pub fn maybe_from_shift(shift: u64) -> Option<ShiftOpShiftImm> {
if shift <= Self::MAX_SHIFT {
Some(ShiftOpShiftImm(shift as u8))
} else {
None
}
}
/// Return the shift amount.
pub fn value(&self) -> u8 {
self.0
}
}
impl ShiftOpAndAmt {
pub fn new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt {
ShiftOpAndAmt { op, shift }
}
/// Get the shift op.
pub fn op(&self) -> ShiftOp {
self.op.clone()
}
/// Get the shift amount.
pub fn amt(&self) -> ShiftOpShiftImm {
self.shift
}
}
/// An extend operator for a register.
#[derive(Clone, Copy, Debug)]
pub enum ExtendOp {
SXTB,
SXTH,
SXTW,
SXTX,
UXTB,
UXTH,
UXTW,
UXTX,
}
impl ExtendOp {
/// Encoding of this op.
pub fn bits(&self) -> u8 {
match self {
&ExtendOp::UXTB => 0b000,
&ExtendOp::UXTH => 0b001,
&ExtendOp::UXTW => 0b010,
&ExtendOp::UXTX => 0b011,
&ExtendOp::SXTB => 0b100,
&ExtendOp::SXTH => 0b101,
&ExtendOp::SXTW => 0b110,
&ExtendOp::SXTX => 0b111,
}
}
}
//=============================================================================
// Instruction sub-components (memory addresses): definitions
/// A reference to some memory address.
#[derive(Clone, Debug)]
pub enum MemLabel {
/// An address in the code, a constant pool or jumptable, with relative
/// offset from this instruction. This form must be used at emission time;
/// see `memlabel_finalize()` for how other forms are lowered to this one.
PCRel(i32),
}
/// A memory argument to load/store, encapsulating the possible addressing modes.
#[derive(Clone, Debug)]
pub enum MemArg {
Label(MemLabel),
PostIndexed(Writable<Reg>, SImm9),
PreIndexed(Writable<Reg>, SImm9),
// N.B.: RegReg, RegScaled, and RegScaledExtended all correspond to
// what the ISA calls the "register offset" addressing mode. We split out
// several options here for more ergonomic codegen.
RegReg(Reg, Reg),
RegScaled(Reg, Reg, Type),
RegScaledExtended(Reg, Reg, Type, ExtendOp),
Unscaled(Reg, SImm9),
UnsignedOffset(Reg, UImm12Scaled),
/// Offset from the stack pointer or frame pointer.
SPOffset(i64),
FPOffset(i64),
}
impl MemArg {
/// Memory reference using an address in a register.
pub fn reg(reg: Reg) -> MemArg {
// Use UnsignedOffset rather than Unscaled to use ldr rather than ldur.
// This also does not use PostIndexed / PreIndexed as they update the register.
MemArg::UnsignedOffset(reg, UImm12Scaled::zero(I64))
}
/// Memory reference using an address in a register and an offset, if possible.
pub fn reg_maybe_offset(reg: Reg, offset: i64, value_type: Type) -> Option<MemArg> {
if offset == 0 {
Some(MemArg::Unscaled(reg, SImm9::zero()))
} else if let Some(simm9) = SImm9::maybe_from_i64(offset) {
Some(MemArg::Unscaled(reg, simm9))
} else if let Some(uimm12s) = UImm12Scaled::maybe_from_i64(offset, value_type) {
Some(MemArg::UnsignedOffset(reg, uimm12s))
} else {
None
}
}
/// Memory reference using the sum of two registers as an address.
pub fn reg_reg(reg1: Reg, reg2: Reg) -> MemArg {
MemArg::RegReg(reg1, reg2)
}
/// Memory reference using `reg1 + sizeof(ty) * reg2` as an address.
pub fn reg_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> MemArg {
MemArg::RegScaled(reg1, reg2, ty)
}
/// Memory reference using `reg1 + sizeof(ty) * reg2` as an address.
pub fn reg_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> MemArg {
MemArg::RegScaledExtended(reg1, reg2, ty, op)
}
/// Memory reference to a label: a global function or value, or data in the constant pool.
pub fn label(label: MemLabel) -> MemArg {
MemArg::Label(label)
}
}
/// A memory argument to a load/store-pair.
#[derive(Clone, Debug)]
pub enum PairMemArg {
SignedOffset(Reg, SImm7Scaled),
PreIndexed(Writable<Reg>, SImm7Scaled),
PostIndexed(Writable<Reg>, SImm7Scaled),
}
//=============================================================================
// Instruction sub-components (conditions, branches and branch targets):
// definitions
/// Condition for conditional branches.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Cond {
Eq,
Ne,
Hs,
Lo,
Mi,
Pl,
Vs,
Vc,
Hi,
Ls,
Ge,
Lt,
Gt,
Le,
Al,
Nv,
}
impl Cond {
/// Return the inverted condition.
pub fn invert(self) -> Cond {
match self {
Cond::Eq => Cond::Ne,
Cond::Ne => Cond::Eq,
Cond::Hs => Cond::Lo,
Cond::Lo => Cond::Hs,
Cond::Mi => Cond::Pl,
Cond::Pl => Cond::Mi,
Cond::Vs => Cond::Vc,
Cond::Vc => Cond::Vs,
Cond::Hi => Cond::Ls,
Cond::Ls => Cond::Hi,
Cond::Ge => Cond::Lt,
Cond::Lt => Cond::Ge,
Cond::Gt => Cond::Le,
Cond::Le => Cond::Gt,
Cond::Al => Cond::Nv,
Cond::Nv => Cond::Al,
}
}
/// Return the machine encoding of this condition.
pub fn bits(self) -> u32 {
match self {
Cond::Eq => 0,
Cond::Ne => 1,
Cond::Hs => 2,
Cond::Lo => 3,
Cond::Mi => 4,
Cond::Pl => 5,
Cond::Vs => 6,
Cond::Vc => 7,
Cond::Hi => 8,
Cond::Ls => 9,
Cond::Ge => 10,
Cond::Lt => 11,
Cond::Gt => 12,
Cond::Le => 13,
Cond::Al => 14,
Cond::Nv => 15,
}
}
}
/// The kind of conditional branch: the common-case-optimized "reg-is-zero" /
/// "reg-is-nonzero" variants, or the generic one that tests the machine
/// condition codes.
#[derive(Clone, Copy, Debug)]
pub enum CondBrKind {
/// Condition: given register is zero.
Zero(Reg),
/// Condition: given register is nonzero.
NotZero(Reg),
/// Condition: the given condition-code test is true.
Cond(Cond),
}
impl CondBrKind {
/// Return the inverted branch condition.
pub fn invert(self) -> CondBrKind {
match self {
CondBrKind::Zero(reg) => CondBrKind::NotZero(reg),
CondBrKind::NotZero(reg) => CondBrKind::Zero(reg),
CondBrKind::Cond(c) => CondBrKind::Cond(c.invert()),
}
}
}
/// A branch target. Either unresolved (basic-block index) or resolved (offset
/// from end of current instruction).
#[derive(Clone, Copy, Debug)]
pub enum BranchTarget {
/// An unresolved reference to a BlockIndex, as passed into
/// `lower_branch_group()`.
Block(BlockIndex),
/// A resolved reference to another instruction, after
/// `Inst::with_block_offsets()`.
ResolvedOffset(isize),
}
impl BranchTarget {
/// Lower the branch target given offsets of each block.
pub fn lower(&mut self, targets: &[CodeOffset], my_offset: CodeOffset) {
match self {
&mut BranchTarget::Block(bix) => {
let bix = bix as usize;
assert!(bix < targets.len());
let block_offset_in_func = targets[bix];
let branch_offset = (block_offset_in_func as isize) - (my_offset as isize);
*self = BranchTarget::ResolvedOffset(branch_offset);
}
&mut BranchTarget::ResolvedOffset(..) => {}
}
}
/// Get the block index.
pub fn as_block_index(&self) -> Option<BlockIndex> {
match self {
&BranchTarget::Block(bix) => Some(bix),
_ => None,
}
}
/// Get the offset as 4-byte words. Returns `0` if not
/// yet resolved (in that case, we're only computing
/// size and the offset doesn't matter).
pub fn as_offset_words(&self) -> isize {
match self {
&BranchTarget::ResolvedOffset(off) => off >> 2,
_ => 0,
}
}
/// Get the offset as a 26-bit offset suitable for a 26-bit jump, or `None` if overflow.
pub fn as_off26(&self) -> Option<u32> {
let off = self.as_offset_words();
if (off < (1 << 25)) && (off >= -(1 << 25)) {
Some((off as u32) & ((1 << 26) - 1))
} else {
None
}
}
/// Get the offset as a 16-bit offset, or `None` if overflow.
pub fn as_off19(&self) -> Option<u32> {
let off = self.as_offset_words();
if (off < (1 << 18)) && (off >= -(1 << 18)) {
Some((off as u32) & ((1 << 19) - 1))
} else {
None
}
}
/// Map the block index given a transform map.
pub fn map(&mut self, block_index_map: &[BlockIndex]) {
match self {
&mut BranchTarget::Block(ref mut bix) => {
let n = block_index_map[*bix as usize];
*bix = n;
}
&mut BranchTarget::ResolvedOffset(_) => {}
}
}
}
impl ShowWithRRU for ShiftOpAndAmt {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("{:?} {}", self.op(), self.amt().value())
}
}
impl ShowWithRRU for ExtendOp {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("{:?}", self)
}
}
impl ShowWithRRU for MemLabel {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&MemLabel::PCRel(off) => format!("pc+{}", off),
}
}
}
fn shift_for_type(ty: Type) -> usize {
match ty.bytes() {
1 => 0,
2 => 1,
4 => 2,
8 => 3,
16 => 4,
_ => panic!("unknown type"),
}
}
impl ShowWithRRU for MemArg {
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&MemArg::Unscaled(reg, simm9) => {
if simm9.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), simm9.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&MemArg::UnsignedOffset(reg, uimm12) => {
if uimm12.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), uimm12.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&MemArg::RegReg(r1, r2) => {
format!("[{}, {}]", r1.show_rru(mb_rru), r2.show_rru(mb_rru),)
}
&MemArg::RegScaled(r1, r2, ty) => {
let shift = shift_for_type(ty);
format!(
"[{}, {}, LSL #{}]",
r1.show_rru(mb_rru),
r2.show_rru(mb_rru),
shift,
)
}
&MemArg::RegScaledExtended(r1, r2, ty, op) => {
let shift = shift_for_type(ty);
let is32 = match op {
ExtendOp::SXTW | ExtendOp::UXTW => true,
_ => false,
};
let op = op.show_rru(mb_rru);
format!(
"[{}, {}, {} #{}]",
r1.show_rru(mb_rru),
show_ireg_sized(r2, mb_rru, is32),
op,
shift
)
}
&MemArg::Label(ref label) => label.show_rru(mb_rru),
&MemArg::PreIndexed(r, simm9) => format!(
"[{}, {}]!",
r.to_reg().show_rru(mb_rru),
simm9.show_rru(mb_rru)
),
&MemArg::PostIndexed(r, simm9) => format!(
"[{}], {}",
r.to_reg().show_rru(mb_rru),
simm9.show_rru(mb_rru)
),
// Eliminated by `mem_finalize()`.
&MemArg::SPOffset(..) | &MemArg::FPOffset(..) => {
panic!("Unexpected stack-offset mem-arg mode!")
}
}
}
}
impl ShowWithRRU for PairMemArg {
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&PairMemArg::SignedOffset(reg, simm7) => {
if simm7.value != 0 {
format!("[{}, {}]", reg.show_rru(mb_rru), simm7.show_rru(mb_rru))
} else {
format!("[{}]", reg.show_rru(mb_rru))
}
}
&PairMemArg::PreIndexed(reg, simm7) => format!(
"[{}, {}]!",
reg.to_reg().show_rru(mb_rru),
simm7.show_rru(mb_rru)
),
&PairMemArg::PostIndexed(reg, simm7) => format!(
"[{}], {}",
reg.to_reg().show_rru(mb_rru),
simm7.show_rru(mb_rru)
),
}
}
}
impl ShowWithRRU for Cond {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
let mut s = format!("{:?}", self);
s.make_ascii_lowercase();
s
}
}
impl ShowWithRRU for BranchTarget {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
match self {
&BranchTarget::Block(block) => format!("block{}", block),
&BranchTarget::ResolvedOffset(off) => format!("{}", off),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,753 @@
//! ARM64 ISA definitions: immediate constants.
#![allow(dead_code)]
#![allow(non_snake_case)]
use crate::ir::types::*;
use crate::ir::Type;
use crate::machinst::*;
use regalloc::RealRegUniverse;
use core::convert::TryFrom;
use std::string::String;
/// A signed, scaled 7-bit offset.
#[derive(Clone, Copy, Debug)]
pub struct SImm7Scaled {
/// The value.
pub value: i16,
/// multiplied by the size of this type
pub scale_ty: Type,
}
impl SImm7Scaled {
/// Create a SImm7Scaled from a raw offset and the known scale type, if
/// possible.
pub fn maybe_from_i64(value: i64, scale_ty: Type) -> Option<SImm7Scaled> {
assert!(scale_ty == I64 || scale_ty == I32);
let scale = scale_ty.bytes();
assert!(scale.is_power_of_two());
let scale = scale as i64;
let upper_limit = 63 * scale;
let lower_limit = -(64 * scale);
if value >= lower_limit && value <= upper_limit && (value & (scale - 1)) == 0 {
Some(SImm7Scaled {
value: value as i16,
scale_ty,
})
} else {
None
}
}
/// Create a zero immediate of this format.
pub fn zero(scale_ty: Type) -> SImm7Scaled {
SImm7Scaled { value: 0, scale_ty }
}
/// Bits for encoding.
pub fn bits(&self) -> u32 {
((self.value / self.scale_ty.bytes() as i16) as u32) & 0x7f
}
}
/// a 9-bit signed offset.
#[derive(Clone, Copy, Debug)]
pub struct SImm9 {
/// The value.
pub value: i16,
}
impl SImm9 {
/// Create a signed 9-bit offset from a full-range value, if possible.
pub fn maybe_from_i64(value: i64) -> Option<SImm9> {
if value >= -256 && value <= 255 {
Some(SImm9 {
value: value as i16,
})
} else {
None
}
}
/// Create a zero immediate of this format.
pub fn zero() -> SImm9 {
SImm9 { value: 0 }
}
/// Bits for encoding.
pub fn bits(&self) -> u32 {
(self.value as u32) & 0x1ff
}
}
/// An unsigned, scaled 12-bit offset.
#[derive(Clone, Copy, Debug)]
pub struct UImm12Scaled {
/// The value.
pub value: u16,
/// multiplied by the size of this type
pub scale_ty: Type,
}
impl UImm12Scaled {
/// Create a UImm12Scaled from a raw offset and the known scale type, if
/// possible.
pub fn maybe_from_i64(value: i64, scale_ty: Type) -> Option<UImm12Scaled> {
let scale = scale_ty.bytes();
assert!(scale.is_power_of_two());
let scale = scale as i64;
let limit = 4095 * scale;
if value >= 0 && value <= limit && (value & (scale - 1)) == 0 {
Some(UImm12Scaled {
value: value as u16,
scale_ty,
})
} else {
None
}
}
/// Create a zero immediate of this format.
pub fn zero(scale_ty: Type) -> UImm12Scaled {
UImm12Scaled { value: 0, scale_ty }
}
/// Encoded bits.
pub fn bits(&self) -> u32 {
(self.value as u32 / self.scale_ty.bytes()) & 0xfff
}
}
/// A shifted immediate value in 'imm12' format: supports 12 bits, shifted
/// left by 0 or 12 places.
#[derive(Clone, Debug)]
pub struct Imm12 {
/// The immediate bits.
pub bits: usize,
/// Whether the immediate bits are shifted left by 12 or not.
pub shift12: bool,
}
impl Imm12 {
/// Compute a Imm12 from raw bits, if possible.
pub fn maybe_from_u64(val: u64) -> Option<Imm12> {
if val == 0 {
Some(Imm12 {
bits: 0,
shift12: false,
})
} else if val < 0xfff {
Some(Imm12 {
bits: val as usize,
shift12: false,
})
} else if val < 0xfff_000 && (val & 0xfff == 0) {
Some(Imm12 {
bits: (val as usize) >> 12,
shift12: true,
})
} else {
None
}
}
/// Bits for 2-bit "shift" field in e.g. AddI.
pub fn shift_bits(&self) -> u8 {
if self.shift12 {
0b01
} else {
0b00
}
}
/// Bits for 12-bit "imm" field in e.g. AddI.
pub fn imm_bits(&self) -> u16 {
self.bits as u16
}
}
/// An immediate for logical instructions.
#[derive(Clone, Debug)]
#[cfg_attr(test, derive(PartialEq))]
pub struct ImmLogic {
/// The actual value.
value: u64,
/// `N` flag.
pub N: bool,
/// `S` field: element size and element bits.
pub R: u8,
/// `R` field: rotate amount.
pub S: u8,
}
impl ImmLogic {
/// Compute an ImmLogic from raw bits, if possible.
pub fn maybe_from_u64(value: u64, ty: Type) -> Option<ImmLogic> {
// Note: This function is a port of VIXL's Assembler::IsImmLogical.
if ty != I64 && ty != I32 {
return None;
}
let original_value = value;
let value = if ty == I32 {
// To handle 32-bit logical immediates, the very easiest thing is to repeat
// the input value twice to make a 64-bit word. The correct encoding of that
// as a logical immediate will also be the correct encoding of the 32-bit
// value.
// Avoid making the assumption that the most-significant 32 bits are zero by
// shifting the value left and duplicating it.
let value = value << 32;
value | value >> 32
} else {
value
};
// Logical immediates are encoded using parameters n, imm_s and imm_r using
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1 bits
// are set. The pattern is rotated right by R, and repeated across a 32 or
// 64-bit value, depending on destination register width.
//
// Put another way: the basic format of a logical immediate is a single
// contiguous stretch of 1 bits, repeated across the whole word at intervals
// given by a power of 2. To identify them quickly, we first locate the
// lowest stretch of 1 bits, then the next 1 bit above that; that combination
// is different for every logical immediate, so it gives us all the
// information we need to identify the only logical immediate that our input
// could be, and then we simply check if that's the value we actually have.
//
// (The rotation parameter does give the possibility of the stretch of 1 bits
// going 'round the end' of the word. To deal with that, we observe that in
// any situation where that happens the bitwise NOT of the value is also a
// valid logical immediate. So we simply invert the input whenever its low bit
// is set, and then we know that the rotated case can't arise.)
let (value, inverted) = if value & 1 == 1 {
(!value, true)
} else {
(value, false)
};
if value == 0 {
return None;
}
// The basic analysis idea: imagine our input word looks like this.
//
// 0011111000111110001111100011111000111110001111100011111000111110
// c b a
// |<--d-->|
//
// We find the lowest set bit (as an actual power-of-2 value, not its index)
// and call it a. Then we add a to our original number, which wipes out the
// bottommost stretch of set bits and replaces it with a 1 carried into the
// next zero bit. Then we look for the new lowest set bit, which is in
// position b, and subtract it, so now our number is just like the original
// but with the lowest stretch of set bits completely gone. Now we find the
// lowest set bit again, which is position c in the diagram above. Then we'll
// measure the distance d between bit positions a and c (using CLZ), and that
// tells us that the only valid logical immediate that could possibly be equal
// to this number is the one in which a stretch of bits running from a to just
// below b is replicated every d bits.
fn lowest_set_bit(value: u64) -> u64 {
let bit = value.trailing_zeros();
1u64.checked_shl(bit).unwrap_or(0)
}
let a = lowest_set_bit(value);
assert_ne!(0, a);
let value_plus_a = value.wrapping_add(a);
let b = lowest_set_bit(value_plus_a);
let value_plus_a_minus_b = value_plus_a - b;
let c = lowest_set_bit(value_plus_a_minus_b);
let (d, clz_a, out_n, mask) = if c != 0 {
// The general case, in which there is more than one stretch of set bits.
// Compute the repeat distance d, and set up a bitmask covering the basic
// unit of repetition (i.e. a word with the bottom d bits set). Also, in all
// of these cases the N bit of the output will be zero.
let clz_a = a.leading_zeros();
let clz_c = c.leading_zeros();
let d = clz_a - clz_c;
let mask = (1 << d) - 1;
(d, clz_a, 0, mask)
} else {
(64, a.leading_zeros(), 1, u64::max_value())
};
// If the repeat period d is not a power of two, it can't be encoded.
if !d.is_power_of_two() {
return None;
}
if ((b.wrapping_sub(a)) & !mask) != 0 {
// If the bit stretch (b - a) does not fit within the mask derived from the
// repeat period, then fail.
return None;
}
// The only possible option is b - a repeated every d bits. Now we're going to
// actually construct the valid logical immediate derived from that
// specification, and see if it equals our original input.
//
// To repeat a value every d bits, we multiply it by a number of the form
// (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
// be derived using a table lookup on CLZ(d).
const MULTIPLIERS: [u64; 6] = [
0x0000000000000001,
0x0000000100000001,
0x0001000100010001,
0x0101010101010101,
0x1111111111111111,
0x5555555555555555,
];
let multiplier = MULTIPLIERS[(u64::from(d).leading_zeros() - 57) as usize];
let candidate = b.wrapping_sub(a) * multiplier;
if value != candidate {
// The candidate pattern doesn't match our input value, so fail.
return None;
}
// We have a match! This is a valid logical immediate, so now we have to
// construct the bits and pieces of the instruction encoding that generates
// it.
// Count the set bits in our basic stretch. The special case of clz(0) == -1
// makes the answer come out right for stretches that reach the very top of
// the word (e.g. numbers like 0xffffc00000000000).
let clz_b = if b == 0 {
u32::max_value() // -1
} else {
b.leading_zeros()
};
let s = clz_a.wrapping_sub(clz_b);
// Decide how many bits to rotate right by, to put the low bit of that basic
// stretch in position a.
let (s, r) = if inverted {
// If we inverted the input right at the start of this function, here's
// where we compensate: the number of set bits becomes the number of clear
// bits, and the rotation count is based on position b rather than position
// a (since b is the location of the 'lowest' 1 bit after inversion).
// Need wrapping for when clz_b is max_value() (for when b == 0).
(d - s, clz_b.wrapping_add(1) & (d - 1))
} else {
(s, (clz_a + 1) & (d - 1))
};
// Now we're done, except for having to encode the S output in such a way that
// it gives both the number of set bits and the length of the repeated
// segment. The s field is encoded like this:
//
// imms size S
// ssssss 64 UInt(ssssss)
// 0sssss 32 UInt(sssss)
// 10ssss 16 UInt(ssss)
// 110sss 8 UInt(sss)
// 1110ss 4 UInt(ss)
// 11110s 2 UInt(s)
//
// So we 'or' (2 * -d) with our computed s to form imms.
let s = ((d * 2).wrapping_neg() | (s - 1)) & 0x3f;
debug_assert!(u8::try_from(r).is_ok());
debug_assert!(u8::try_from(s).is_ok());
Some(ImmLogic {
value: original_value,
N: out_n != 0,
R: r as u8,
S: s as u8,
})
}
pub fn from_raw(value: u64, n: bool, r: u8, s: u8) -> ImmLogic {
ImmLogic {
N: n,
R: r,
S: s,
value,
}
}
/// Returns bits ready for encoding: (N:1, R:6, S:6)
pub fn enc_bits(&self) -> u16 {
((self.N as u16) << 12) | ((self.R as u16) << 6) | (self.S as u16)
}
/// Returns the value that this immediate represents.
pub fn value(&self) -> u64 {
self.value
}
/// Return an immediate for the bitwise-inverted value.
pub fn invert(&self) -> ImmLogic {
// For every ImmLogical immediate, the inverse can also be encoded.
Self::maybe_from_u64(!self.value, I64).unwrap()
}
}
/// An immediate for shift instructions.
#[derive(Clone, Debug)]
pub struct ImmShift {
/// 6-bit shift amount.
pub imm: u8,
}
impl ImmShift {
/// Create an ImmShift from raw bits, if possible.
pub fn maybe_from_u64(val: u64) -> Option<ImmShift> {
if val < 64 {
Some(ImmShift { imm: val as u8 })
} else {
None
}
}
/// Get the immediate value.
pub fn value(&self) -> u8 {
self.imm
}
}
/// A 16-bit immediate for a MOVZ instruction, with a {0,16,32,48}-bit shift.
#[derive(Clone, Copy, Debug)]
pub struct MoveWideConst {
/// The value.
pub bits: u16,
/// shifted 16*shift bits to the left.
pub shift: u8,
}
impl MoveWideConst {
/// Construct a MoveWideConst from an arbitrary 64-bit constant if possible.
pub fn maybe_from_u64(value: u64) -> Option<MoveWideConst> {
let mask0 = 0x0000_0000_0000_ffffu64;
let mask1 = 0x0000_0000_ffff_0000u64;
let mask2 = 0x0000_ffff_0000_0000u64;
let mask3 = 0xffff_0000_0000_0000u64;
if value == (value & mask0) {
return Some(MoveWideConst {
bits: (value & mask0) as u16,
shift: 0,
});
}
if value == (value & mask1) {
return Some(MoveWideConst {
bits: ((value >> 16) & mask0) as u16,
shift: 1,
});
}
if value == (value & mask2) {
return Some(MoveWideConst {
bits: ((value >> 32) & mask0) as u16,
shift: 2,
});
}
if value == (value & mask3) {
return Some(MoveWideConst {
bits: ((value >> 48) & mask0) as u16,
shift: 3,
});
}
None
}
pub fn maybe_with_shift(imm: u16, shift: u8) -> Option<MoveWideConst> {
let shift_enc = shift / 16;
if shift_enc > 3 {
None
} else {
Some(MoveWideConst {
bits: imm,
shift: shift_enc,
})
}
}
/// Returns the value that this constant represents.
pub fn value(&self) -> u64 {
(self.bits as u64) << (16 * self.shift)
}
}
impl ShowWithRRU for Imm12 {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
let shift = if self.shift12 { 12 } else { 0 };
let value = self.bits << shift;
format!("#{}", value)
}
}
impl ShowWithRRU for SImm7Scaled {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("#{}", self.value)
}
}
impl ShowWithRRU for SImm9 {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("#{}", self.value)
}
}
impl ShowWithRRU for UImm12Scaled {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("#{}", self.value)
}
}
impl ShowWithRRU for ImmLogic {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("#{}", self.value())
}
}
impl ShowWithRRU for ImmShift {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
format!("#{}", self.imm)
}
}
impl ShowWithRRU for MoveWideConst {
fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
if self.shift == 0 {
format!("#{}", self.bits)
} else {
format!("#{}, LSL #{}", self.bits, self.shift * 16)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn imm_logical_test() {
assert_eq!(None, ImmLogic::maybe_from_u64(0, I64));
assert_eq!(None, ImmLogic::maybe_from_u64(u64::max_value(), I64));
assert_eq!(
Some(ImmLogic {
value: 1,
N: true,
R: 0,
S: 0
}),
ImmLogic::maybe_from_u64(1, I64)
);
assert_eq!(
Some(ImmLogic {
value: 2,
N: true,
R: 63,
S: 0
}),
ImmLogic::maybe_from_u64(2, I64)
);
assert_eq!(None, ImmLogic::maybe_from_u64(5, I64));
assert_eq!(None, ImmLogic::maybe_from_u64(11, I64));
assert_eq!(
Some(ImmLogic {
value: 248,
N: true,
R: 61,
S: 4
}),
ImmLogic::maybe_from_u64(248, I64)
);
assert_eq!(None, ImmLogic::maybe_from_u64(249, I64));
assert_eq!(
Some(ImmLogic {
value: 1920,
N: true,
R: 57,
S: 3
}),
ImmLogic::maybe_from_u64(1920, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x7ffe,
N: true,
R: 63,
S: 13
}),
ImmLogic::maybe_from_u64(0x7ffe, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x30000,
N: true,
R: 48,
S: 1
}),
ImmLogic::maybe_from_u64(0x30000, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x100000,
N: true,
R: 44,
S: 0
}),
ImmLogic::maybe_from_u64(0x100000, I64)
);
assert_eq!(
Some(ImmLogic {
value: u64::max_value() - 1,
N: true,
R: 63,
S: 62
}),
ImmLogic::maybe_from_u64(u64::max_value() - 1, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0xaaaaaaaaaaaaaaaa,
N: false,
R: 1,
S: 60
}),
ImmLogic::maybe_from_u64(0xaaaaaaaaaaaaaaaa, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x8181818181818181,
N: false,
R: 1,
S: 49
}),
ImmLogic::maybe_from_u64(0x8181818181818181, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0xffc3ffc3ffc3ffc3,
N: false,
R: 10,
S: 43
}),
ImmLogic::maybe_from_u64(0xffc3ffc3ffc3ffc3, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x100000001,
N: false,
R: 0,
S: 0
}),
ImmLogic::maybe_from_u64(0x100000001, I64)
);
assert_eq!(
Some(ImmLogic {
value: 0x1111111111111111,
N: false,
R: 0,
S: 56
}),
ImmLogic::maybe_from_u64(0x1111111111111111, I64)
);
for n in 0..2 {
let types = if n == 0 { vec![I64, I32] } else { vec![I64] };
for s in 0..64 {
for r in 0..64 {
let imm = get_logical_imm(n, s, r);
for &ty in &types {
match ImmLogic::maybe_from_u64(imm, ty) {
Some(ImmLogic { value, .. }) => {
assert_eq!(imm, value);
ImmLogic::maybe_from_u64(!value, ty).unwrap();
}
None => assert_eq!(0, imm),
};
}
}
}
}
}
// Repeat a value that has `width` bits, across a 64-bit value.
fn repeat(value: u64, width: u64) -> u64 {
let mut result = value & ((1 << width) - 1);
let mut i = width;
while i < 64 {
result |= result << i;
i *= 2;
}
result
}
// Get the logical immediate, from the encoding N/R/S bits.
fn get_logical_imm(n: u32, s: u32, r: u32) -> u64 {
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 64-bit value.
if n == 1 {
if s == 0x3f {
return 0;
}
let bits = (1u64 << (s + 1)) - 1;
bits.rotate_right(r)
} else {
if (s >> 1) == 0x1f {
return 0;
}
let mut width = 0x20;
while width >= 0x2 {
if (s & width) == 0 {
let mask = width - 1;
if (s & mask) == mask {
return 0;
}
let bits = (1u64 << ((s & mask) + 1)) - 1;
return repeat(bits.rotate_right(r & mask), width.into());
}
width >>= 1;
}
unreachable!();
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
//! ARM64 ISA definitions: registers.
#![allow(dead_code)]
use crate::machinst::*;
use regalloc::{
RealReg, RealRegUniverse, Reg, RegClass, RegClassInfo, SpillSlot, VirtualReg, Writable,
NUM_REG_CLASSES,
};
use std::string::{String, ToString};
//=============================================================================
// Registers, the Universe thereof, and printing
#[rustfmt::skip]
const XREG_INDICES: [u8; 31] = [
// X0 - X7
32, 33, 34, 35, 36, 37, 38, 39,
// X8 - X14
40, 41, 42, 43, 44, 45, 46,
// X15
59,
// X16, X17
47, 48,
// X18
60,
// X19 - X28
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
// X29
61,
// X30
62,
];
const ZERO_REG_INDEX: u8 = 63;
const SP_REG_INDEX: u8 = 64;
/// Get a reference to an X-register (integer register).
pub fn xreg(num: u8) -> Reg {
assert!(num < 31);
Reg::new_real(
RegClass::I64,
/* enc = */ num,
/* index = */ XREG_INDICES[num as usize],
)
}
/// Get a writable reference to an X-register.
pub fn writable_xreg(num: u8) -> Writable<Reg> {
Writable::from_reg(xreg(num))
}
/// Get a reference to a V-register (vector/FP register).
pub fn vreg(num: u8) -> Reg {
assert!(num < 32);
Reg::new_real(RegClass::V128, /* enc = */ num, /* index = */ num)
}
/// Get a writable reference to a V-register.
pub fn writable_vreg(num: u8) -> Writable<Reg> {
Writable::from_reg(vreg(num))
}
/// Get a reference to the zero-register.
pub fn zero_reg() -> Reg {
// This should be the same as what xreg(31) returns, except that
// we use the special index into the register index space.
Reg::new_real(
RegClass::I64,
/* enc = */ 31,
/* index = */ ZERO_REG_INDEX,
)
}
/// Get a writable reference to the zero-register (this discards a result).
pub fn writable_zero_reg() -> Writable<Reg> {
Writable::from_reg(zero_reg())
}
/// Get a reference to the stack-pointer register.
pub fn stack_reg() -> Reg {
// XSP (stack) and XZR (zero) are logically different registers which have
// the same hardware encoding, and whose meaning, in real arm64
// instructions, is context-dependent. For convenience of
// universe-construction and for correct printing, we make them be two
// different real registers.
Reg::new_real(
RegClass::I64,
/* enc = */ 31,
/* index = */ SP_REG_INDEX,
)
}
/// Get a writable reference to the stack-pointer register.
pub fn writable_stack_reg() -> Writable<Reg> {
Writable::from_reg(stack_reg())
}
/// Get a reference to the link register (x30).
pub fn link_reg() -> Reg {
xreg(30)
}
/// Get a writable reference to the link register.
pub fn writable_link_reg() -> Writable<Reg> {
Writable::from_reg(link_reg())
}
/// Get a reference to the frame pointer (x29).
pub fn fp_reg() -> Reg {
xreg(29)
}
/// Get a writable reference to the frame pointer.
pub fn writable_fp_reg() -> Writable<Reg> {
Writable::from_reg(fp_reg())
}
/// Get a reference to the "spill temp" register. This register is used to
/// compute the address of a spill slot when a direct offset addressing mode from
/// FP is not sufficient (+/- 2^11 words). We exclude this register from regalloc
/// and reserve it for this purpose for simplicity; otherwise we need a
/// multi-stage analysis where we first determine how many spill slots we have,
/// then perhaps remove the reg from the pool and recompute regalloc.
pub fn spilltmp_reg() -> Reg {
xreg(15)
}
/// Get a writable reference to the spilltmp reg.
pub fn writable_spilltmp_reg() -> Writable<Reg> {
Writable::from_reg(spilltmp_reg())
}
/// Create the register universe for ARM64.
pub fn create_reg_universe() -> RealRegUniverse {
let mut regs = vec![];
let mut allocable_by_class = [None; NUM_REG_CLASSES];
// Numbering Scheme: we put V-regs first, then X-regs. The X-regs
// exclude several registers: x18 (globally reserved for platform-specific
// purposes), x29 (frame pointer), x30 (link register), x31 (stack pointer
// or zero register, depending on context).
let v_reg_base = 0u8; // in contiguous real-register index space
let v_reg_count = 32;
for i in 0u8..v_reg_count {
let reg = Reg::new_real(
RegClass::V128,
/* enc = */ i,
/* index = */ v_reg_base + i,
)
.to_real_reg();
let name = format!("v{}", i);
regs.push((reg, name));
}
let v_reg_last = v_reg_base + v_reg_count - 1;
// Add the X registers. N.B.: the order here must match the order implied
// by XREG_INDICES, ZERO_REG_INDEX, and SP_REG_INDEX above.
let x_reg_base = 32u8; // in contiguous real-register index space
let mut x_reg_count = 0;
for i in 0u8..32u8 {
// See above for excluded registers.
if i == 15 || i == 18 || i == 29 || i == 30 || i == 31 {
continue;
}
let reg = Reg::new_real(
RegClass::I64,
/* enc = */ i,
/* index = */ x_reg_base + x_reg_count,
)
.to_real_reg();
let name = format!("x{}", i);
regs.push((reg, name));
x_reg_count += 1;
}
let x_reg_last = x_reg_base + x_reg_count - 1;
allocable_by_class[RegClass::I64.rc_to_usize()] = Some(RegClassInfo {
first: x_reg_base as usize,
last: x_reg_last as usize,
suggested_scratch: Some(XREG_INDICES[13] as usize),
});
allocable_by_class[RegClass::V128.rc_to_usize()] = Some(RegClassInfo {
first: v_reg_base as usize,
last: v_reg_last as usize,
suggested_scratch: Some(/* V31: */ 31),
});
// Other regs, not available to the allocator.
let allocable = regs.len();
regs.push((xreg(15).to_real_reg(), "x15".to_string()));
regs.push((xreg(18).to_real_reg(), "x18".to_string()));
regs.push((fp_reg().to_real_reg(), "fp".to_string()));
regs.push((link_reg().to_real_reg(), "lr".to_string()));
regs.push((zero_reg().to_real_reg(), "xzr".to_string()));
regs.push((stack_reg().to_real_reg(), "sp".to_string()));
// FIXME JRS 2020Feb06: unfortunately this pushes the number of real regs
// to 65, which is potentially inconvenient from a compiler performance
// standpoint. We could possibly drop back to 64 by "losing" a vector
// register in future.
// Assert sanity: the indices in the register structs must match their
// actual indices in the array.
for (i, reg) in regs.iter().enumerate() {
assert_eq!(i, reg.0.get_index());
}
RealRegUniverse {
regs,
allocable,
allocable_by_class,
}
}
/// If |ireg| denotes an I64-classed reg, make a best-effort attempt to show
/// its name at the 32-bit size.
pub fn show_ireg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, is32: bool) -> String {
let mut s = reg.show_rru(mb_rru);
if reg.get_class() != RegClass::I64 || !is32 {
// We can't do any better.
return s;
}
if reg.is_real() {
// Change (eg) "x42" into "w42" as appropriate
if reg.get_class() == RegClass::I64 && is32 && s.starts_with("x") {
s = "w".to_string() + &s[1..];
}
} else {
// Add a "w" suffix to RegClass::I64 vregs used in a 32-bit role
if reg.get_class() == RegClass::I64 && is32 {
s = s + &"w";
}
}
s
}
/// Show a vector register when its use as a 32-bit or 64-bit float is known.
pub fn show_freg_sized(reg: Reg, mb_rru: Option<&RealRegUniverse>, is32: bool) -> String {
let s = reg.show_rru(mb_rru);
if reg.get_class() != RegClass::V128 {
return s;
}
let prefix = if is32 { "s" } else { "d" };
prefix.to_string() + &s[1..]
}
/// Show a vector register used in a scalar context.
pub fn show_vreg_scalar(reg: Reg, mb_rru: Option<&RealRegUniverse>) -> String {
let mut s = reg.show_rru(mb_rru);
if reg.get_class() != RegClass::V128 {
// We can't do any better.
return s;
}
if reg.is_real() {
// Change (eg) "v0" into "d0".
if reg.get_class() == RegClass::V128 && s.starts_with("v") {
s = "d".to_string() + &s[1..];
}
} else {
// Add a "d" suffix to RegClass::V128 vregs.
if reg.get_class() == RegClass::V128 {
s = s + &"d";
}
}
s
}

View File

@@ -1 +1 @@
// Empty.
mod inst;