Multi-register value support: framework for Values wider than machine regs.

This will allow for support for `I128` values everywhere, and `I64`
values on 32-bit targets (e.g., ARM32 and x86-32). It does not alter the
machine backends to build such support; it just adds the framework for
the MachInst backends to *reason* about a `Value` residing in more than
one register.
This commit is contained in:
Chris Fallin
2020-12-12 20:48:56 -08:00
parent 6317290a1d
commit 6eea015d6c
18 changed files with 1024 additions and 561 deletions

View File

@@ -5,8 +5,12 @@ use crate::ir::StackSlot;
use crate::isa::CallConv;
use crate::machinst::*;
use crate::settings;
use regalloc::{Reg, Set, SpillSlot, Writable};
use smallvec::SmallVec;
/// A small vector of instructions (with some reasonable size); appropriate for
/// a small fixed sequence implementing one operation.
pub type SmallInstVec<I> = SmallVec<[I; 4]>;
/// Trait implemented by an object that tracks ABI-related state (e.g., stack
/// layout) and can generate code while emitting the *body* of a function.
@@ -14,9 +18,9 @@ pub trait ABICallee {
/// The instruction type for the ISA associated with this ABI.
type I: VCodeInst;
/// Does the ABI-body code need a temp reg? One will be provided to `init()`
/// as the `maybe_tmp` arg if so.
fn temp_needed(&self) -> bool;
/// Does the ABI-body code need a temp reg (and if so, of what type)? One
/// will be provided to `init()` as the `maybe_tmp` arg if so.
fn temp_needed(&self) -> Option<Type>;
/// Initialize. This is called after the ABICallee is constructed because it
/// may be provided with a temp vreg, which can only be allocated once the
@@ -52,7 +56,11 @@ pub trait ABICallee {
/// Generate an instruction which copies an argument to a destination
/// register.
fn gen_copy_arg_to_reg(&self, idx: usize, into_reg: Writable<Reg>) -> Self::I;
fn gen_copy_arg_to_regs(
&self,
idx: usize,
into_reg: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I>;
/// Is the given argument needed in the body (as opposed to, e.g., serving
/// only as a special ABI-specific placeholder)? This controls whether
@@ -67,7 +75,11 @@ pub trait ABICallee {
fn gen_retval_area_setup(&self) -> Option<Self::I>;
/// Generate an instruction which copies a source register to a return value slot.
fn gen_copy_reg_to_retval(&self, idx: usize, from_reg: Writable<Reg>) -> Vec<Self::I>;
fn gen_copy_regs_to_retval(
&self,
idx: usize,
from_reg: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I>;
/// Generate a return instruction.
fn gen_ret(&self) -> Self::I;
@@ -99,17 +111,33 @@ pub trait ABICallee {
slot: StackSlot,
offset: u32,
ty: Type,
into_reg: Writable<Reg>,
) -> Self::I;
into_reg: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I>;
/// Store to a stackslot.
fn store_stackslot(&self, slot: StackSlot, offset: u32, ty: Type, from_reg: Reg) -> Self::I;
fn store_stackslot(
&self,
slot: StackSlot,
offset: u32,
ty: Type,
from_reg: ValueRegs<Reg>,
) -> SmallInstVec<Self::I>;
/// Load from a spillslot.
fn load_spillslot(&self, slot: SpillSlot, ty: Type, into_reg: Writable<Reg>) -> Self::I;
fn load_spillslot(
&self,
slot: SpillSlot,
ty: Type,
into_reg: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I>;
/// Store to a spillslot.
fn store_spillslot(&self, slot: SpillSlot, ty: Type, from_reg: Reg) -> Self::I;
fn store_spillslot(
&self,
slot: SpillSlot,
ty: Type,
from_reg: ValueRegs<Reg>,
) -> SmallInstVec<Self::I>;
/// Generate a stack map, given a list of spillslots and the emission state
/// at a given program point (prior to emission fo the safepointing
@@ -125,13 +153,13 @@ pub trait ABICallee {
/// `store_retval`, and spillslot accesses.) `self` is mutable so that we
/// can store information in it which will be useful when creating the
/// epilogue.
fn gen_prologue(&mut self) -> Vec<Self::I>;
fn gen_prologue(&mut self) -> SmallInstVec<Self::I>;
/// Generate an epilogue, post-regalloc. Note that this must generate the
/// actual return instruction (rather than emitting this in the lowering
/// logic), because the epilogue code comes before the return and the two are
/// likely closely related.
fn gen_epilogue(&self) -> Vec<Self::I>;
fn gen_epilogue(&self) -> SmallInstVec<Self::I>;
/// Returns the full frame size for the given function, after prologue
/// emission has run. This comprises the spill slots and stack-storage slots
@@ -188,19 +216,19 @@ pub trait ABICaller {
fn num_args(&self) -> usize;
/// Emit a copy of an argument value from a source register, prior to the call.
fn emit_copy_reg_to_arg<C: LowerCtx<I = Self::I>>(
fn emit_copy_regs_to_arg<C: LowerCtx<I = Self::I>>(
&self,
ctx: &mut C,
idx: usize,
from_reg: Reg,
from_reg: ValueRegs<Reg>,
);
/// Emit a copy a return value into a destination register, after the call returns.
fn emit_copy_retval_to_reg<C: LowerCtx<I = Self::I>>(
fn emit_copy_retval_to_regs<C: LowerCtx<I = Self::I>>(
&self,
ctx: &mut C,
idx: usize,
into_reg: Writable<Reg>,
into_reg: ValueRegs<Writable<Reg>>,
);
/// Emit code to pre-adjust the stack, prior to argument copies and call.

View File

@@ -119,6 +119,7 @@ use crate::{ir, isa};
use alloc::vec::Vec;
use log::{debug, trace};
use regalloc::{RealReg, Reg, RegClass, Set, SpillSlot, Writable};
use smallvec::{smallvec, SmallVec};
use std::convert::TryFrom;
use std::marker::PhantomData;
use std::mem;
@@ -126,9 +127,9 @@ use std::mem;
/// A location for an argument or return value.
#[derive(Clone, Copy, Debug)]
pub enum ABIArg {
/// In a real register.
/// In a real register (or set of registers).
Reg(
RealReg,
ValueRegs<RealReg>,
ir::Type,
ir::ArgumentExtension,
ir::ArgumentPurpose,
@@ -183,6 +184,17 @@ pub enum StackAMode {
SPOffset(i64, ir::Type),
}
impl StackAMode {
/// Offset by an addend.
pub fn offset(self, addend: i64) -> Self {
match self {
StackAMode::FPOffset(off, ty) => StackAMode::FPOffset(off + addend, ty),
StackAMode::NominalSPOffset(off, ty) => StackAMode::NominalSPOffset(off + addend, ty),
StackAMode::SPOffset(off, ty) => StackAMode::SPOffset(off + addend, ty),
}
}
}
/// Trait implemented by machine-specific backend to provide information about
/// register assignments and to allow generating the specific instructions for
/// stack loads/saves, prologues/epilogues, etc.
@@ -270,12 +282,12 @@ pub trait ABIMachineSpec {
///
/// - The add-imm sequence must work correctly when `from_reg` and/or
/// `into_reg` are the register returned by `get_stacklimit_reg()`.
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Self::I; 4]>;
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Self::I>;
/// Generate a sequence that traps with a `TrapCode::StackOverflow` code if
/// the stack pointer is less than the given limit register (assuming the
/// stack grows downward).
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Self::I; 2]>;
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I>;
/// Generate an instruction to compute an address of a stack slot (FP- or
/// SP-based offset).
@@ -301,7 +313,7 @@ pub trait ABIMachineSpec {
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Self::I;
/// Adjust the stack pointer up or down.
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Self::I; 2]>;
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I>;
/// Generate a meta-instruction that adjusts the nominal SP offset.
fn gen_nominal_sp_adj(amount: i32) -> Self::I;
@@ -309,13 +321,13 @@ pub trait ABIMachineSpec {
/// Generate the usual frame-setup sequence for this architecture: e.g.,
/// `push rbp / mov rbp, rsp` on x86-64, or `stp fp, lr, [sp, #-16]!` on
/// AArch64.
fn gen_prologue_frame_setup() -> SmallVec<[Self::I; 2]>;
fn gen_prologue_frame_setup() -> SmallInstVec<Self::I>;
/// Generate the usual frame-restore sequence for this architecture.
fn gen_epilogue_frame_restore() -> SmallVec<[Self::I; 2]>;
fn gen_epilogue_frame_restore() -> SmallInstVec<Self::I>;
/// Generate a probestack call.
fn gen_probestack(_frame_size: u32) -> SmallVec<[Self::I; 2]>;
fn gen_probestack(_frame_size: u32) -> SmallInstVec<Self::I>;
/// Generate a clobber-save sequence. This takes the list of *all* registers
/// written/modified by the function body. The implementation here is
@@ -483,7 +495,7 @@ pub struct ABICalleeImpl<M: ABIMachineSpec> {
/// need to be extremely careful with each instruction. The instructions are
/// manually register-allocated and carefully only use caller-saved
/// registers and keep nothing live after this sequence of instructions.
stack_limit: Option<(Reg, Vec<M::I>)>,
stack_limit: Option<(Reg, SmallInstVec<M::I>)>,
/// Are we to invoke the probestack function in the prologue? If so,
/// what is the minimum size at which we must invoke it?
probestack_min_frame: Option<u32>,
@@ -498,7 +510,7 @@ fn get_special_purpose_param_register(
) -> Option<Reg> {
let idx = f.signature.special_param_index(purpose)?;
match abi.args[idx] {
ABIArg::Reg(reg, ..) => Some(reg.to_reg()),
ABIArg::Reg(regs, ..) => Some(regs.only_reg().unwrap().to_reg()),
ABIArg::Stack(..) => None,
}
}
@@ -539,7 +551,7 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
// from the arguments.
let stack_limit =
get_special_purpose_param_register(f, &sig, ir::ArgumentPurpose::StackLimit)
.map(|reg| (reg, Vec::new()))
.map(|reg| (reg, smallvec![]))
.or_else(|| f.stack_limit.map(|gv| gen_stack_limit::<M>(f, &sig, gv)));
// Determine whether a probestack call is required for large enough
@@ -596,7 +608,12 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
/// No values can be live after the prologue, but in this case that's ok
/// because we just need to perform a stack check before progressing with
/// the rest of the function.
fn insert_stack_check(&self, stack_limit: Reg, stack_size: u32, insts: &mut Vec<M::I>) {
fn insert_stack_check(
&self,
stack_limit: Reg,
stack_size: u32,
insts: &mut SmallInstVec<M::I>,
) {
// With no explicit stack allocated we can just emit the simple check of
// the stack registers against the stack limit register, and trap if
// it's out of bounds.
@@ -649,8 +666,8 @@ fn gen_stack_limit<M: ABIMachineSpec>(
f: &ir::Function,
abi: &ABISig,
gv: ir::GlobalValue,
) -> (Reg, Vec<M::I>) {
let mut insts = Vec::new();
) -> (Reg, SmallInstVec<M::I>) {
let mut insts = smallvec![];
let reg = generate_gv::<M>(f, abi, gv, &mut insts);
return (reg, insts);
}
@@ -659,7 +676,7 @@ fn generate_gv<M: ABIMachineSpec>(
f: &ir::Function,
abi: &ABISig,
gv: ir::GlobalValue,
insts: &mut Vec<M::I>,
insts: &mut SmallInstVec<M::I>,
) -> Reg {
match f.global_values[gv] {
// Return the direct register the vmcontext is in
@@ -709,11 +726,76 @@ fn ty_from_ty_hint_or_reg_class<M: ABIMachineSpec>(r: Reg, ty: Option<Type>) ->
}
}
fn gen_move_multi<M: ABIMachineSpec>(
dst: ValueRegs<Writable<Reg>>,
src: ValueRegs<Reg>,
ty: Type,
) -> SmallInstVec<M::I> {
let mut ret = smallvec![];
let (_, tys) = M::I::rc_for_type(ty).unwrap();
for ((&dst, &src), &ty) in dst.regs().iter().zip(src.regs().iter()).zip(tys.iter()) {
ret.push(M::gen_move(dst, src, ty));
}
ret
}
fn gen_load_stack_multi<M: ABIMachineSpec>(
from: StackAMode,
dst: ValueRegs<Writable<Reg>>,
ty: Type,
) -> SmallInstVec<M::I> {
let mut ret = smallvec![];
let (_, tys) = M::I::rc_for_type(ty).unwrap();
let mut offset = 0;
// N.B.: registers are given in the `ValueRegs` in target endian order.
for (&dst, &ty) in dst.regs().iter().zip(tys.iter()) {
ret.push(M::gen_load_stack(from.offset(offset), dst, ty));
offset += ty.bytes() as i64;
}
ret
}
fn gen_store_stack_multi<M: ABIMachineSpec>(
from: StackAMode,
src: ValueRegs<Reg>,
ty: Type,
) -> SmallInstVec<M::I> {
let mut ret = smallvec![];
let (_, tys) = M::I::rc_for_type(ty).unwrap();
let mut offset = 0;
// N.B.: registers are given in the `ValueRegs` in target endian order.
for (&src, &ty) in src.regs().iter().zip(tys.iter()) {
ret.push(M::gen_store_stack(from.offset(offset), src, ty));
offset += ty.bytes() as i64;
}
ret
}
fn gen_store_base_offset_multi<M: ABIMachineSpec>(
base: Reg,
mut offset: i32,
src: ValueRegs<Reg>,
ty: Type,
) -> SmallInstVec<M::I> {
let mut ret = smallvec![];
let (_, tys) = M::I::rc_for_type(ty).unwrap();
// N.B.: registers are given in the `ValueRegs` in target endian order.
for (&src, &ty) in src.regs().iter().zip(tys.iter()) {
ret.push(M::gen_store_base_offset(base, offset, src, ty));
offset += ty.bytes() as i32;
}
ret
}
impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
type I = M::I;
fn temp_needed(&self) -> bool {
self.sig.stack_ret_arg.is_some()
fn temp_needed(&self) -> Option<Type> {
if self.sig.stack_ret_arg.is_some() {
Some(M::word_type())
} else {
None
}
}
fn init(&mut self, maybe_tmp: Option<Writable<Reg>>) {
@@ -740,8 +822,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
fn liveins(&self) -> Set<RealReg> {
let mut set: Set<RealReg> = Set::empty();
for &arg in &self.sig.args {
if let ABIArg::Reg(r, ..) = arg {
set.insert(r);
if let ABIArg::Reg(regs, ..) = arg {
for &r in regs.regs() {
set.insert(r);
}
}
}
set
@@ -750,8 +834,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
fn liveouts(&self) -> Set<RealReg> {
let mut set: Set<RealReg> = Set::empty();
for &ret in &self.sig.rets {
if let ABIArg::Reg(r, ..) = ret {
set.insert(r);
if let ABIArg::Reg(regs, ..) = ret {
for &r in regs.regs() {
set.insert(r);
}
}
}
set
@@ -769,14 +855,20 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
self.stackslots.len()
}
fn gen_copy_arg_to_reg(&self, idx: usize, into_reg: Writable<Reg>) -> Self::I {
fn gen_copy_arg_to_regs(
&self,
idx: usize,
into_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I> {
match &self.sig.args[idx] {
// Extension mode doesn't matter (we're copying out, not in; we
// ignore high bits by convention).
&ABIArg::Reg(r, ty, ..) => M::gen_move(into_reg, r.to_reg(), ty),
&ABIArg::Stack(off, ty, ..) => M::gen_load_stack(
&ABIArg::Reg(regs, ty, ..) => {
gen_move_multi::<M>(into_regs, regs.map(|r| r.to_reg()), ty)
}
&ABIArg::Stack(off, ty, ..) => gen_load_stack_multi::<M>(
StackAMode::FPOffset(M::fp_to_arg_offset(self.call_conv, &self.flags) + off, ty),
into_reg,
into_regs,
ty,
),
}
@@ -792,19 +884,29 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
}
}
fn gen_copy_reg_to_retval(&self, idx: usize, from_reg: Writable<Reg>) -> Vec<Self::I> {
let mut ret = Vec::new();
fn gen_copy_regs_to_retval(
&self,
idx: usize,
from_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I> {
let mut ret = smallvec![];
let word_bits = M::word_bits() as u8;
match &self.sig.rets[idx] {
&ABIArg::Reg(r, ty, ext, ..) => {
&ABIArg::Reg(regs, ty, ext, ..) => {
let from_bits = ty_bits(ty) as u8;
let dest_reg = Writable::from_reg(r.to_reg());
let dest_regs = writable_value_regs(regs.map(|r| r.to_reg()));
let ext = M::get_ext_mode(self.sig.call_conv, ext);
match (ext, from_bits) {
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
if n < word_bits =>
{
let signed = ext == ArgumentExtension::Sext;
let dest_reg = dest_regs
.only_reg()
.expect("extension only possible from one-reg value");
let from_reg = from_regs
.only_reg()
.expect("extension only possible from one-reg value");
ret.push(M::gen_extend(
dest_reg,
from_reg.to_reg(),
@@ -813,7 +915,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
/* to_bits = */ word_bits,
));
}
_ => ret.push(M::gen_move(dest_reg, from_reg.to_reg(), ty)),
_ => ret.extend(
gen_move_multi::<M>(dest_regs, non_writable_value_regs(from_regs), ty)
.into_iter(),
),
};
}
&ABIArg::Stack(off, mut ty, ext, ..) => {
@@ -829,6 +934,9 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
if n < word_bits =>
{
let from_reg = from_regs
.only_reg()
.expect("extension only possible from one-reg value");
assert_eq!(M::word_reg_class(), from_reg.to_reg().get_class());
let signed = ext == ArgumentExtension::Sext;
ret.push(M::gen_extend(
@@ -843,12 +951,15 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
}
_ => {}
};
ret.push(M::gen_store_base_offset(
self.ret_area_ptr.unwrap().to_reg(),
off,
from_reg.to_reg(),
ty,
));
ret.extend(
gen_store_base_offset_multi::<M>(
self.ret_area_ptr.unwrap().to_reg(),
off,
non_writable_value_regs(from_regs),
ty,
)
.into_iter(),
);
}
}
ret
@@ -856,7 +967,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
fn gen_retval_area_setup(&self) -> Option<Self::I> {
if let Some(i) = self.sig.stack_ret_arg {
let inst = self.gen_copy_arg_to_reg(i, self.ret_area_ptr.unwrap());
let insts = self.gen_copy_arg_to_regs(i, ValueRegs::one(self.ret_area_ptr.unwrap()));
let inst = insts.into_iter().next().unwrap();
trace!(
"gen_retval_area_setup: inst {:?}; ptr reg is {:?}",
inst,
@@ -891,24 +1003,30 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
slot: StackSlot,
offset: u32,
ty: Type,
into_reg: Writable<Reg>,
) -> Self::I {
into_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I> {
// Offset from beginning of stackslot area, which is at nominal SP (see
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
let sp_off: i64 = stack_off + (offset as i64);
trace!("load_stackslot: slot {} -> sp_off {}", slot, sp_off);
M::gen_load_stack(StackAMode::NominalSPOffset(sp_off, ty), into_reg, ty)
gen_load_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), into_regs, ty)
}
/// Store to a stackslot.
fn store_stackslot(&self, slot: StackSlot, offset: u32, ty: Type, from_reg: Reg) -> Self::I {
fn store_stackslot(
&self,
slot: StackSlot,
offset: u32,
ty: Type,
from_regs: ValueRegs<Reg>,
) -> SmallInstVec<Self::I> {
// Offset from beginning of stackslot area, which is at nominal SP (see
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
let sp_off: i64 = stack_off + (offset as i64);
trace!("store_stackslot: slot {} -> sp_off {}", slot, sp_off);
M::gen_store_stack(StackAMode::NominalSPOffset(sp_off, ty), from_reg, ty)
gen_store_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), from_regs, ty)
}
/// Produce an instruction that computes a stackslot address.
@@ -921,23 +1039,33 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
}
/// Load from a spillslot.
fn load_spillslot(&self, slot: SpillSlot, ty: Type, into_reg: Writable<Reg>) -> Self::I {
fn load_spillslot(
&self,
slot: SpillSlot,
ty: Type,
into_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<Self::I> {
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
let islot = slot.get() as i64;
let spill_off = islot * M::word_bytes() as i64;
let sp_off = self.stackslots_size as i64 + spill_off;
trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
M::gen_load_stack(StackAMode::NominalSPOffset(sp_off, ty), into_reg, ty)
gen_load_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), into_regs, ty)
}
/// Store to a spillslot.
fn store_spillslot(&self, slot: SpillSlot, ty: Type, from_reg: Reg) -> Self::I {
fn store_spillslot(
&self,
slot: SpillSlot,
ty: Type,
from_regs: ValueRegs<Reg>,
) -> SmallInstVec<Self::I> {
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
let islot = slot.get() as i64;
let spill_off = islot * M::word_bytes() as i64;
let sp_off = self.stackslots_size as i64 + spill_off;
trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
M::gen_store_stack(StackAMode::NominalSPOffset(sp_off, ty), from_reg, ty)
gen_store_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), from_regs, ty)
}
fn spillslots_to_stack_map(
@@ -970,8 +1098,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
StackMap::from_slice(&bits[..])
}
fn gen_prologue(&mut self) -> Vec<Self::I> {
let mut insts = vec![];
fn gen_prologue(&mut self) -> SmallInstVec<Self::I> {
let mut insts = smallvec![];
if !self.call_conv.extends_baldrdash() {
// set up frame
insts.extend(M::gen_prologue_frame_setup().into_iter());
@@ -994,7 +1122,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
// specified, otherwise always insert the stack check.
if total_stacksize > 0 || !self.is_leaf {
if let Some((reg, stack_limit_load)) = &self.stack_limit {
insts.extend_from_slice(stack_limit_load);
insts.extend(stack_limit_load.clone());
self.insert_stack_check(*reg, total_stacksize, &mut insts);
}
if let Some(min_frame) = &self.probestack_min_frame {
@@ -1037,8 +1165,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
insts
}
fn gen_epilogue(&self) -> Vec<M::I> {
let mut insts = vec![];
fn gen_epilogue(&self) -> SmallInstVec<M::I> {
let mut insts = smallvec![];
// Restore clobbered registers.
insts.extend(M::gen_clobber_restore(
@@ -1079,7 +1207,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
fn gen_spill(&self, to_slot: SpillSlot, from_reg: RealReg, ty: Option<Type>) -> Self::I {
let ty = ty_from_ty_hint_or_reg_class::<M>(from_reg.to_reg(), ty);
self.store_spillslot(to_slot, ty, from_reg.to_reg())
self.store_spillslot(to_slot, ty, ValueRegs::one(from_reg.to_reg()))
.into_iter()
.next()
.unwrap()
}
fn gen_reload(
@@ -1089,7 +1220,14 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
ty: Option<Type>,
) -> Self::I {
let ty = ty_from_ty_hint_or_reg_class::<M>(to_reg.to_reg().to_reg(), ty);
self.load_spillslot(from_slot, ty, to_reg.map(|r| r.to_reg()))
self.load_spillslot(
from_slot,
ty,
writable_value_regs(ValueRegs::one(to_reg.to_reg().to_reg())),
)
.into_iter()
.next()
.unwrap()
}
fn unwind_info_kind(&self) -> UnwindInfoKind {
@@ -1110,7 +1248,7 @@ fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Wr
let mut uses = Vec::new();
for arg in &sig.args {
match arg {
&ABIArg::Reg(reg, ..) => uses.push(reg.to_reg()),
&ABIArg::Reg(regs, ..) => uses.extend(regs.regs().iter().map(|r| r.to_reg())),
_ => {}
}
}
@@ -1119,7 +1257,9 @@ fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Wr
let mut defs = M::get_regs_clobbered_by_call(sig.call_conv);
for ret in &sig.rets {
match ret {
&ABIArg::Reg(reg, ..) => defs.push(Writable::from_reg(reg.to_reg())),
&ABIArg::Reg(regs, ..) => {
defs.extend(regs.regs().iter().map(|r| Writable::from_reg(r.to_reg())))
}
_ => {}
}
}
@@ -1238,18 +1378,19 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
adjust_stack_and_nominal_sp::<M, C>(ctx, off as i32, /* is_sub = */ false)
}
fn emit_copy_reg_to_arg<C: LowerCtx<I = Self::I>>(
fn emit_copy_regs_to_arg<C: LowerCtx<I = Self::I>>(
&self,
ctx: &mut C,
idx: usize,
from_reg: Reg,
from_regs: ValueRegs<Reg>,
) {
let word_rc = M::word_reg_class();
let word_bits = M::word_bits() as usize;
match &self.sig.args[idx] {
&ABIArg::Reg(reg, ty, ext, _) => {
&ABIArg::Reg(regs, ty, ext, _) => {
let ext = M::get_ext_mode(self.sig.call_conv, ext);
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
let reg = regs.only_reg().unwrap();
assert_eq!(word_rc, reg.get_class());
let signed = match ext {
ir::ArgumentExtension::Uext => false,
@@ -1258,18 +1399,27 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
};
ctx.emit(M::gen_extend(
Writable::from_reg(reg.to_reg()),
from_reg,
from_regs.only_reg().unwrap(),
signed,
ty_bits(ty) as u8,
word_bits as u8,
));
} else {
ctx.emit(M::gen_move(Writable::from_reg(reg.to_reg()), from_reg, ty));
for insn in gen_move_multi::<M>(
writable_value_regs(regs.map(|r| r.to_reg())),
from_regs,
ty,
) {
ctx.emit(insn);
}
}
}
&ABIArg::Stack(off, mut ty, ext, _) => {
let ext = M::get_ext_mode(self.sig.call_conv, ext);
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
let from_reg = from_regs
.only_reg()
.expect("only one reg for sub-word value width");
assert_eq!(word_rc, from_reg.get_class());
let signed = match ext {
ir::ArgumentExtension::Uext => false,
@@ -1289,32 +1439,37 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
// Store the extended version.
ty = M::word_type();
}
ctx.emit(M::gen_store_stack(
StackAMode::SPOffset(off, ty),
from_reg,
ty,
));
for insn in gen_store_stack_multi::<M>(StackAMode::SPOffset(off, ty), from_regs, ty)
{
ctx.emit(insn);
}
}
}
}
fn emit_copy_retval_to_reg<C: LowerCtx<I = Self::I>>(
fn emit_copy_retval_to_regs<C: LowerCtx<I = Self::I>>(
&self,
ctx: &mut C,
idx: usize,
into_reg: Writable<Reg>,
into_regs: ValueRegs<Writable<Reg>>,
) {
match &self.sig.rets[idx] {
// Extension mode doesn't matter because we're copying out, not in,
// and we ignore high bits in our own registers by convention.
&ABIArg::Reg(reg, ty, _, _) => ctx.emit(M::gen_move(into_reg, reg.to_reg(), ty)),
&ABIArg::Reg(regs, ty, _, _) => {
for insn in gen_move_multi::<M>(into_regs, regs.map(|r| r.to_reg()), ty) {
ctx.emit(insn);
}
}
&ABIArg::Stack(off, ty, _, _) => {
let ret_area_base = self.sig.stack_arg_space;
ctx.emit(M::gen_load_stack(
for insn in gen_load_stack_multi::<M>(
StackAMode::SPOffset(off + ret_area_base, ty),
into_reg,
into_regs,
ty,
));
) {
ctx.emit(insn);
}
}
}
}
@@ -1324,19 +1479,18 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
mem::replace(&mut self.uses, Default::default()),
mem::replace(&mut self.defs, Default::default()),
);
let word_rc = M::word_reg_class();
let word_type = M::word_type();
if let Some(i) = self.sig.stack_ret_arg {
let rd = ctx.alloc_tmp(word_rc, word_type);
let rd = ctx.alloc_tmp(word_type).only_reg().unwrap();
let ret_area_base = self.sig.stack_arg_space;
ctx.emit(M::gen_get_stack_addr(
StackAMode::SPOffset(ret_area_base, I8),
rd,
I8,
));
self.emit_copy_reg_to_arg(ctx, i, rd.to_reg());
self.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(rd.to_reg()));
}
let tmp = ctx.alloc_tmp(word_rc, word_type);
let tmp = ctx.alloc_tmp(word_type).only_reg().unwrap();
for (is_safepoint, inst) in M::gen_call(
&self.dest,
uses,

View File

@@ -1,6 +1,6 @@
//! Miscellaneous helpers for machine backends.
use super::{InsnOutput, LowerCtx, VCodeInst};
use super::{InsnOutput, LowerCtx, VCodeInst, ValueRegs};
use crate::ir::Type;
use regalloc::{Reg, Writable};
@@ -23,6 +23,6 @@ pub(crate) fn ty_has_float_or_vec_representation(ty: Type) -> bool {
pub(crate) fn get_output_reg<I: VCodeInst, C: LowerCtx<I = I>>(
ctx: &mut C,
spec: InsnOutput,
) -> Writable<Reg> {
) -> ValueRegs<Writable<Reg>> {
ctx.get_output(spec.insn, spec.output)
}

View File

@@ -5,29 +5,27 @@
// TODO: separate the IR-query core of `LowerCtx` from the lowering logic built
// on top of it, e.g. the side-effect/coloring analysis and the scan support.
use crate::data_value::DataValue;
use crate::entity::SecondaryMap;
use crate::fx::{FxHashMap, FxHashSet};
use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
use crate::ir::instructions::BranchInfo;
use crate::ir::types::I64;
use crate::ir::{
ArgumentPurpose, Block, Constant, ConstantData, ExternalName, Function, GlobalValueData, Inst,
InstructionData, MemFlags, Opcode, Signature, SourceLoc, Type, Value, ValueDef,
};
use crate::machinst::{
ABICallee, BlockIndex, BlockLoweringOrder, LoweredBlock, MachLabel, VCode, VCodeBuilder,
VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst,
writable_value_regs, ABICallee, BlockIndex, BlockLoweringOrder, LoweredBlock, MachLabel, VCode,
VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs,
};
use crate::CodegenResult;
use regalloc::{Reg, RegClass, StackmapRequestInfo, VirtualReg, Writable};
use crate::data_value::DataValue;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::TryInto;
use log::debug;
use regalloc::{Reg, StackmapRequestInfo, Writable};
use smallvec::SmallVec;
use std::fmt::Debug;
/// An "instruction color" partitions CLIF instructions by side-effecting ops.
/// All instructions with the same "color" are guaranteed not to be separated by
@@ -71,7 +69,7 @@ pub trait LowerCtx {
/// instruction should lower into a sequence that fills this register. (Why
/// not allow the backend to specify its own result register for the return?
/// Because there may be multiple return points.)
fn retval(&self, idx: usize) -> Writable<Reg>;
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>>;
/// Returns the vreg containing the VmContext parameter, if there's one.
fn get_vm_context(&self) -> Option<Reg>;
@@ -118,7 +116,7 @@ pub trait LowerCtx {
///
/// The instruction input may be available in either of these forms. It may
/// be available in neither form, if the conditions are not met; if so, use
/// `put_input_in_reg()` instead to get it in a register.
/// `put_input_in_regs()` instead to get it in a register.
///
/// If the backend merges the effect of a side-effecting instruction, it
/// must call `sink_inst()`. When this is called, it indicates that the
@@ -126,29 +124,29 @@ pub trait LowerCtx {
/// instruction's result(s) must have *no* uses remaining, because it will
/// not be codegen'd (it has been integrated into the current instruction).
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput;
/// Put the `idx`th input into a register and return the assigned register.
fn put_input_in_reg(&mut self, ir_inst: Inst, idx: usize) -> Reg;
/// Get the `idx`th output register of the given IR instruction. When
/// Put the `idx`th input into register(s) and return the assigned register.
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg>;
/// Get the `idx`th output register(s) of the given IR instruction. When
/// `backend.lower_inst_to_regs(ctx, inst)` is called, it is expected that
/// the backend will write results to these output register(s). This
/// register will always be "fresh"; it is guaranteed not to overlap with
/// any of the inputs, and can be freely used as a scratch register within
/// the lowered instruction sequence, as long as its final value is the
/// result of the computation.
fn get_output(&self, ir_inst: Inst, idx: usize) -> Writable<Reg>;
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>>;
// Codegen primitives: allocate temps, emit instructions, set result registers,
// ask for an input to be gen'd into a register.
/// Get a new temp.
fn alloc_tmp(&mut self, rc: RegClass, ty: Type) -> Writable<Reg>;
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>>;
/// Emit a machine instruction.
fn emit(&mut self, mach_inst: Self::I);
/// Emit a machine instruction that is a safepoint.
fn emit_safepoint(&mut self, mach_inst: Self::I);
/// Indicate that the side-effect of an instruction has been sunk to the
/// current scan location. This should only be done with the instruction's
/// original results are not used (i.e., `put_input_in_reg` is not invoked
/// original results are not used (i.e., `put_input_in_regs` is not invoked
/// for the input produced by the sunk instruction), otherwise the
/// side-effect will occur twice.
fn sink_inst(&mut self, ir_inst: Inst);
@@ -234,10 +232,10 @@ pub struct Lower<'func, I: VCodeInst> {
vcode: VCodeBuilder<I>,
/// Mapping from `Value` (SSA value in IR) to virtual register.
value_regs: SecondaryMap<Value, Reg>,
value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
/// Return-value vregs.
retval_regs: Vec<Reg>,
retval_regs: Vec<ValueRegs<Reg>>,
/// Instruction colors at block exits. From this map, we can recover all
/// instruction colors by scanning backward from the block end and
@@ -306,20 +304,30 @@ pub enum RelocDistance {
Far,
}
fn alloc_vreg(
value_regs: &mut SecondaryMap<Value, Reg>,
regclass: RegClass,
value: Value,
fn alloc_vregs<I: VCodeInst>(
ty: Type,
next_vreg: &mut u32,
) -> VirtualReg {
if value_regs[value].is_invalid() {
// default value in map.
let v = *next_vreg;
*next_vreg += 1;
value_regs[value] = Reg::new_virtual(regclass, v);
debug!("value {} gets vreg {:?}", value, v);
vcode: &mut VCodeBuilder<I>,
) -> CodegenResult<ValueRegs<Reg>> {
let v = *next_vreg;
let (regclasses, tys) = I::rc_for_type(ty)?;
*next_vreg += regclasses.len() as u32;
let regs = match regclasses {
&[rc0] => ValueRegs::one(Reg::new_virtual(rc0, v)),
&[rc0, rc1] => ValueRegs::two(Reg::new_virtual(rc0, v), Reg::new_virtual(rc1, v + 1)),
#[cfg(feature = "arm32")]
&[rc0, rc1, rc2, rc3] => ValueRegs::four(
Reg::new_virtual(rc0, v),
Reg::new_virtual(rc1, v + 1),
Reg::new_virtual(rc2, v + 2),
Reg::new_virtual(rc3, v + 3),
),
_ => panic!("Value must reside in 1, 2 or 4 registers"),
};
for (&reg_ty, &reg) in tys.iter().zip(regs.regs().iter()) {
vcode.set_vreg_type(reg.to_virtual_reg(), reg_ty);
}
value_regs[value].as_virtual_reg().unwrap()
Ok(regs)
}
enum GenerateReturn {
@@ -340,26 +348,29 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
let mut next_vreg: u32 = 0;
let mut value_regs = SecondaryMap::with_default(Reg::invalid());
let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
// Assign a vreg to each block param and each inst result.
for bb in f.layout.blocks() {
for &param in f.dfg.block_params(bb) {
let ty = f.dfg.value_type(param);
let vreg = alloc_vreg(&mut value_regs, I::rc_for_type(ty)?, param, &mut next_vreg);
vcode.set_vreg_type(vreg, ty);
debug!("bb {} param {}: vreg {:?}", bb, param, vreg);
if value_regs[param].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[param] = regs;
debug!("bb {} param {}: regs {:?}", bb, param, regs);
}
}
for inst in f.layout.block_insts(bb) {
for &result in f.dfg.inst_results(inst) {
let ty = f.dfg.value_type(result);
let vreg =
alloc_vreg(&mut value_regs, I::rc_for_type(ty)?, result, &mut next_vreg);
vcode.set_vreg_type(vreg, ty);
debug!(
"bb {} inst {} ({:?}): result vreg {:?}",
bb, inst, f.dfg[inst], vreg
);
if value_regs[result].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[result] = regs;
debug!(
"bb {} inst {} ({:?}): result regs {:?}",
bb, inst, f.dfg[inst], regs,
);
}
}
}
}
@@ -370,18 +381,15 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
.map(|vm_context_index| {
let entry_block = f.layout.entry_block().unwrap();
let param = f.dfg.block_params(entry_block)[vm_context_index];
value_regs[param]
value_regs[param].only_reg().unwrap()
});
// Assign a vreg to each return value.
// Assign vreg(s) to each return value.
let mut retval_regs = vec![];
for ret in &f.signature.returns {
let v = next_vreg;
next_vreg += 1;
let regclass = I::rc_for_type(ret.value_type)?;
let vreg = Reg::new_virtual(regclass, v);
retval_regs.push(vreg);
vcode.set_vreg_type(vreg.as_virtual_reg().unwrap(), ret.value_type);
let regs = alloc_vregs(ret.value_type, &mut next_vreg, &mut vcode)?;
retval_regs.push(regs);
debug!("retval gets regs {:?}", regs);
}
// Compute instruction colors, find constant instructions, and find instructions with
@@ -453,9 +461,10 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
if !self.vcode.abi().arg_is_needed_in_body(i) {
continue;
}
let reg = Writable::from_reg(self.value_regs[*param]);
let insn = self.vcode.abi().gen_copy_arg_to_reg(i, reg);
self.emit(insn);
let regs = writable_value_regs(self.value_regs[*param]);
for insn in self.vcode.abi().gen_copy_arg_to_regs(i, regs).into_iter() {
self.emit(insn);
}
}
if let Some(insn) = self.vcode.abi().gen_retval_area_setup() {
self.emit(insn);
@@ -465,10 +474,14 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
fn gen_retval_setup(&mut self, gen_ret_inst: GenerateReturn) {
let retval_regs = self.retval_regs.clone();
for (i, reg) in retval_regs.into_iter().enumerate() {
let reg = Writable::from_reg(reg);
let insns = self.vcode.abi().gen_copy_reg_to_retval(i, reg);
for insn in insns {
for (i, regs) in retval_regs.into_iter().enumerate() {
let regs = writable_value_regs(regs);
for insn in self
.vcode
.abi()
.gen_copy_regs_to_retval(i, regs)
.into_iter()
{
self.emit(insn);
}
}
@@ -499,8 +512,8 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
//
// * one for dsts whose sources are non-constants.
let mut const_bundles = SmallVec::<[(Type, Writable<Reg>, u64); 16]>::new();
let mut var_bundles = SmallVec::<[(Type, Writable<Reg>, Reg); 16]>::new();
let mut const_bundles: SmallVec<[_; 16]> = SmallVec::new();
let mut var_bundles: SmallVec<[_; 16]> = SmallVec::new();
let mut i = 0;
for (dst_val, src_val) in self
@@ -514,7 +527,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
let ty = self.f.dfg.value_type(src_val);
debug_assert!(ty == self.f.dfg.value_type(*dst_val));
let dst_reg = self.value_regs[*dst_val];
let dst_regs = self.value_regs[*dst_val];
let input = self.get_value_as_source_or_const(src_val);
debug!("jump arg {} is {}", i, src_val);
@@ -522,15 +535,15 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
if let Some(c) = input.constant {
debug!(" -> constant {}", c);
const_bundles.push((ty, Writable::from_reg(dst_reg), c));
const_bundles.push((ty, writable_value_regs(dst_regs), c));
} else {
let src_reg = self.put_value_in_reg(src_val);
debug!(" -> reg {:?}", src_reg);
let src_regs = self.put_value_in_regs(src_val);
debug!(" -> reg {:?}", src_regs);
// Skip self-assignments. Not only are they pointless, they falsely trigger the
// overlap-check below and hence can cause a lot of unnecessary copying through
// temporaries.
if dst_reg != src_reg {
var_bundles.push((ty, Writable::from_reg(dst_reg), src_reg));
if dst_regs != src_regs {
var_bundles.push((ty, writable_value_regs(dst_regs), src_regs));
}
}
}
@@ -541,41 +554,69 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
// for cases of up to circa 16 args. Currently not possible because regalloc.rs
// does not export it.
let mut src_reg_set = FxHashSet::<Reg>::default();
for (_, _, src_reg) in &var_bundles {
src_reg_set.insert(*src_reg);
for (_, _, src_regs) in &var_bundles {
for &reg in src_regs.regs() {
src_reg_set.insert(reg);
}
}
let mut overlaps = false;
for (_, dst_reg, _) in &var_bundles {
if src_reg_set.contains(&dst_reg.to_reg()) {
overlaps = true;
break;
'outer: for (_, dst_regs, _) in &var_bundles {
for &reg in dst_regs.regs() {
if src_reg_set.contains(&reg.to_reg()) {
overlaps = true;
break 'outer;
}
}
}
// If, as is mostly the case, the source and destination register sets are non
// overlapping, then we can copy directly, so as to save the register allocator work.
if !overlaps {
for (ty, dst_reg, src_reg) in &var_bundles {
self.emit(I::gen_move(*dst_reg, *src_reg, *ty));
for (ty, dst_regs, src_regs) in &var_bundles {
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((dst, src), reg_ty) in dst_regs
.regs()
.iter()
.zip(src_regs.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*dst, *src, *reg_ty));
}
}
} else {
// There's some overlap, so play safe and copy via temps.
let mut tmp_regs = SmallVec::<[Writable<Reg>; 16]>::new();
let mut tmp_regs = SmallVec::<[ValueRegs<Writable<Reg>>; 16]>::new();
for (ty, _, _) in &var_bundles {
tmp_regs.push(self.alloc_tmp(I::rc_for_type(*ty)?, *ty));
tmp_regs.push(self.alloc_tmp(*ty));
}
for ((ty, _, src_reg), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
self.emit(I::gen_move(*tmp_reg, *src_reg, *ty));
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((tmp, src), reg_ty) in tmp_reg
.regs()
.iter()
.zip(src_reg.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*tmp, *src, *reg_ty));
}
}
for ((ty, dst_reg, _), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
self.emit(I::gen_move(*dst_reg, (*tmp_reg).to_reg(), *ty));
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((dst, tmp), reg_ty) in dst_reg
.regs()
.iter()
.zip(tmp_reg.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*dst, tmp.to_reg(), *reg_ty));
}
}
}
// Now, finally, deal with the moves whose sources are constants.
for (ty, dst_reg, const_u64) in &const_bundles {
for inst in I::gen_constant(*dst_reg, *const_u64, *ty, |reg_class, ty| {
self.alloc_tmp(reg_class, ty)
for (ty, dst_reg, const_val) in &const_bundles {
for inst in I::gen_constant(*dst_reg, *const_val as u128, *ty, |ty| {
self.alloc_tmp(ty).only_reg().unwrap()
})
.into_iter()
{
@@ -766,8 +807,8 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
debug!("about to lower function: {:?}", self.f);
// Initialize the ABI object, giving it a temp if requested.
let maybe_tmp = if self.vcode.abi().temp_needed() {
Some(self.alloc_tmp(RegClass::I64, I64))
let maybe_tmp = if let Some(temp_ty) = self.vcode.abi().temp_needed() {
Some(self.alloc_tmp(temp_ty).only_reg().unwrap())
} else {
None
};
@@ -848,11 +889,11 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
Ok((vcode, stack_map_info))
}
fn put_value_in_reg(&mut self, val: Value) -> Reg {
debug!("put_value_in_reg: val {}", val,);
let mut reg = self.value_regs[val];
debug!(" -> reg {:?}", reg);
assert!(reg.is_valid());
fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
debug!("put_value_in_reg: val {}", val);
let mut regs = self.value_regs[val];
debug!(" -> regs {:?}", regs);
assert!(regs.is_valid());
self.value_lowered_uses[val] += 1;
@@ -864,12 +905,12 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
if let ValueDef::Result(i, 0) = self.f.dfg.value_def(val) {
if self.f.dfg[i].opcode() == Opcode::GetPinnedReg {
if let Some(pr) = self.pinned_reg {
reg = pr;
regs = ValueRegs::one(pr);
}
}
}
reg
regs
}
/// Get the actual inputs for a value. This is the implementation for
@@ -944,8 +985,8 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
self.vcode.abi()
}
fn retval(&self, idx: usize) -> Writable<Reg> {
Writable::from_reg(self.retval_regs[idx])
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>> {
writable_value_regs(self.retval_regs[idx])
}
fn get_vm_context(&self) -> Option<Reg> {
@@ -1050,23 +1091,19 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
self.get_value_as_source_or_const(val)
}
fn put_input_in_reg(&mut self, ir_inst: Inst, idx: usize) -> Reg {
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
let val = self.f.dfg.inst_args(ir_inst)[idx];
let val = self.f.dfg.resolve_aliases(val);
self.put_value_in_reg(val)
self.put_value_in_regs(val)
}
fn get_output(&self, ir_inst: Inst, idx: usize) -> Writable<Reg> {
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>> {
let val = self.f.dfg.inst_results(ir_inst)[idx];
Writable::from_reg(self.value_regs[val])
writable_value_regs(self.value_regs[val])
}
fn alloc_tmp(&mut self, rc: RegClass, ty: Type) -> Writable<Reg> {
let v = self.next_vreg;
self.next_vreg += 1;
let vreg = Reg::new_virtual(rc, v);
self.vcode.set_vreg_type(vreg.as_virtual_reg().unwrap(), ty);
Writable::from_reg(vreg)
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
writable_value_regs(alloc_vregs(ty, &mut self.next_vreg, &mut self.vcode).unwrap())
}
fn emit(&mut self, mach_inst: I) {
@@ -1131,8 +1168,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
if reg.is_virtual() {
reg
} else {
let rc = reg.get_class();
let new_reg = self.alloc_tmp(rc, ty);
let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
self.emit(I::gen_move(new_reg, reg, ty));
new_reg.to_reg()
}

View File

@@ -135,6 +135,8 @@ pub mod helpers;
pub use helpers::*;
pub mod inst_common;
pub use inst_common::*;
pub mod valueregs;
pub use valueregs::*;
/// A machine instruction.
pub trait MachInst: Clone + Debug {
@@ -165,9 +167,9 @@ pub trait MachInst: Clone + Debug {
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Self;
/// Generate a constant into a reg.
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
to_reg: Writable<Reg>,
value: u64,
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
to_regs: ValueRegs<Writable<Reg>>,
value: u128,
ty: Type,
alloc_tmp: F,
) -> SmallVec<[Self; 4]>;
@@ -180,9 +182,19 @@ pub trait MachInst: Clone + Debug {
/// (e.g., add directly from or directly to memory), like x86.
fn maybe_direct_reload(&self, reg: VirtualReg, slot: SpillSlot) -> Option<Self>;
/// Determine a register class to store the given Cranelift type.
/// May return an error if the type isn't supported by this backend.
fn rc_for_type(ty: Type) -> CodegenResult<RegClass>;
/// Determine register class(es) to store the given Cranelift type, and the
/// Cranelift type actually stored in the underlying register(s). May return
/// an error if the type isn't supported by this backend.
///
/// If the type requires multiple registers, then the list of registers is
/// returned in little-endian order.
///
/// Note that the type actually stored in the register(s) may differ in the
/// case that a value is split across registers: for example, on a 32-bit
/// target, an I64 may be stored in two registers, each of which holds an
/// I32. The actually-stored types are used only to inform the backend when
/// generating spills and reloads for individual registers.
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])>;
/// Generate a jump to another target. Used during lowering of
/// control flow.

View File

@@ -0,0 +1,185 @@
//! Data structure for tracking the (possibly multiple) registers that hold one
//! SSA `Value`.
use regalloc::{RealReg, Reg, VirtualReg, Writable};
use std::fmt::Debug;
#[cfg(feature = "arm32")]
const VALUE_REGS_PARTS: usize = 4;
#[cfg(not(feature = "arm32"))]
const VALUE_REGS_PARTS: usize = 2;
/// Location at which a `Value` is stored in register(s): the value is located
/// in one or more registers, depending on its width. A value may be stored in
/// more than one register if the machine has no registers wide enough
/// otherwise: for example, on a 32-bit architecture, we may store `I64` values
/// in two registers, and `I128` values in four.
///
/// By convention, the register parts are kept in machine-endian order here.
///
/// N.B.: we cap the capacity of this at four (when any 32-bit target is
/// enabled) or two (otherwise), and we use special in-band sentinal `Reg`
/// values (`Reg::invalid()`) to avoid the need to carry a separate length. This
/// allows the struct to be `Copy` (no heap or drop overhead) and be only 16 or
/// 8 bytes, which is important for compiler performance.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ValueRegs<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> {
parts: [R; VALUE_REGS_PARTS],
}
/// A type with an "invalid" sentinel value.
pub trait InvalidSentinel: Copy + Eq {
/// The invalid sentinel value.
fn invalid_sentinel() -> Self;
/// Is this the invalid sentinel?
fn is_invalid_sentinel(self) -> bool {
self == Self::invalid_sentinel()
}
}
impl InvalidSentinel for Reg {
fn invalid_sentinel() -> Self {
Reg::invalid()
}
}
impl InvalidSentinel for VirtualReg {
fn invalid_sentinel() -> Self {
VirtualReg::invalid()
}
}
impl InvalidSentinel for RealReg {
fn invalid_sentinel() -> Self {
RealReg::invalid()
}
}
impl InvalidSentinel for Writable<Reg> {
fn invalid_sentinel() -> Self {
Writable::from_reg(Reg::invalid_sentinel())
}
}
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
/// Create an invalid Value-in-Reg.
pub fn invalid() -> Self {
ValueRegs {
parts: [R::invalid_sentinel(); VALUE_REGS_PARTS],
}
}
/// Is this Value-to-Reg mapping valid?
pub fn is_valid(self) -> bool {
!self.parts[0].is_invalid_sentinel()
}
/// Is this Value-to-Reg mapping invalid?
pub fn is_invalid(self) -> bool {
self.parts[0].is_invalid_sentinel()
}
/// Return the single register used for this value, if any.
pub fn only_reg(self) -> Option<R> {
if self.len() == 1 {
Some(self.parts[0])
} else {
None
}
}
/// Return an iterator over the registers storing this value.
pub fn regs(&self) -> &[R] {
&self.parts[0..self.len()]
}
}
#[cfg(feature = "arm32")]
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
/// Create a Value-in-R location for a value stored in one register.
pub fn one(reg: R) -> Self {
ValueRegs {
parts: [
reg,
R::invalid_sentinel(),
R::invalid_sentinel(),
R::invalid_sentinel(),
],
}
}
/// Create a Value-in-R location for a value stored in two registers.
pub fn two(r1: R, r2: R) -> Self {
ValueRegs {
parts: [r1, r2, R::invalid_sentinel(), R::invalid_sentinel()],
}
}
/// Create a Value-in-R location for a value stored in four registers.
pub fn four(r1: R, r2: R, r3: R, r4: R) -> Self {
ValueRegs {
parts: [r1, r2, r3, r4],
}
}
/// Return the number of registers used.
pub fn len(self) -> usize {
// If rustc/LLVM is smart enough, this might even be vectorized...
(self.parts[0] != R::invalid_sentinel()) as usize
+ (self.parts[1] != R::invalid_sentinel()) as usize
+ (self.parts[2] != R::invalid_sentinel()) as usize
+ (self.parts[3] != R::invalid_sentinel()) as usize
}
/// Map individual registers via a map function.
pub fn map<NewR, F>(self, f: F) -> ValueRegs<NewR>
where
NewR: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel,
F: Fn(R) -> NewR,
{
ValueRegs {
parts: [
f(self.parts[0]),
f(self.parts[1]),
f(self.parts[2]),
f(self.parts[3]),
],
}
}
}
#[cfg(not(feature = "arm32"))]
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
/// Create a Value-in-R location for a value stored in one register.
pub fn one(reg: R) -> Self {
ValueRegs {
parts: [reg, R::invalid_sentinel()],
}
}
/// Create a Value-in-R location for a value stored in two registers.
pub fn two(r1: R, r2: R) -> Self {
ValueRegs { parts: [r1, r2] }
}
/// Return the number of registers used.
pub fn len(self) -> usize {
// If rustc/LLVM is smart enough, this might even be vectorized...
(self.parts[0] != R::invalid_sentinel()) as usize
+ (self.parts[1] != R::invalid_sentinel()) as usize
}
/// Map individual registers via a map function.
pub fn map<NewR, F>(self, f: F) -> ValueRegs<NewR>
where
NewR: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel,
F: Fn(R) -> NewR,
{
ValueRegs {
parts: [f(self.parts[0]), f(self.parts[1])],
}
}
}
/// Create a writable ValueRegs.
pub(crate) fn writable_value_regs(regs: ValueRegs<Reg>) -> ValueRegs<Writable<Reg>> {
regs.map(|r| Writable::from_reg(r))
}
/// Strip a writable ValueRegs down to a readonly ValueRegs.
pub(crate) fn non_writable_value_regs(regs: ValueRegs<Writable<Reg>>) -> ValueRegs<Reg> {
regs.map(|r| r.to_reg())
}