Address review comments: more doc comments and some minor refactorings.

This commit is contained in:
Chris Fallin
2021-08-30 17:15:37 -07:00
parent e10bffbca8
commit 6d313f2b56
11 changed files with 256 additions and 153 deletions

View File

@@ -38,23 +38,24 @@ pub struct CFGInfo {
impl CFGInfo {
pub fn new<F: Function>(f: &F) -> Result<CFGInfo, RegAllocError> {
let postorder =
postorder::calculate(f.blocks(), f.entry_block(), |block| f.block_succs(block));
let postorder = postorder::calculate(f.num_blocks(), f.entry_block(), |block| {
f.block_succs(block)
});
let domtree = domtree::calculate(
f.blocks(),
f.num_blocks(),
|block| f.block_preds(block),
&postorder[..],
f.entry_block(),
);
let mut insn_block = vec![Block::invalid(); f.insts()];
let mut insn_block = vec![Block::invalid(); f.num_insts()];
let mut vreg_def_inst = vec![Inst::invalid(); f.num_vregs()];
let mut vreg_def_blockparam = vec![(Block::invalid(), 0); f.num_vregs()];
let mut block_entry = vec![ProgPoint::before(Inst::invalid()); f.blocks()];
let mut block_exit = vec![ProgPoint::before(Inst::invalid()); f.blocks()];
let mut backedge_in = vec![0; f.blocks()];
let mut backedge_out = vec![0; f.blocks()];
let mut block_entry = vec![ProgPoint::before(Inst::invalid()); f.num_blocks()];
let mut block_exit = vec![ProgPoint::before(Inst::invalid()); f.num_blocks()];
let mut backedge_in = vec![0; f.num_blocks()];
let mut backedge_out = vec![0; f.num_blocks()];
for block in 0..f.blocks() {
for block in 0..f.num_blocks() {
let block = Block::new(block);
for (i, param) in f.block_params(block).iter().enumerate() {
vreg_def_blockparam[param.vreg()] = (block, i as u32);
@@ -116,7 +117,7 @@ impl CFGInfo {
let mut approx_loop_depth = vec![];
let mut backedge_stack: SmallVec<[usize; 4]> = smallvec![];
let mut cur_depth = 0;
for block in 0..f.blocks() {
for block in 0..f.num_blocks() {
if backedge_in[block] > 0 {
cur_depth += 1;
backedge_stack.push(backedge_in[block]);

View File

@@ -389,17 +389,6 @@ impl CheckerState {
self.allocations.remove(&Allocation::reg(*clobber));
}
}
&CheckerInst::BlockParams {
ref vregs,
ref allocs,
..
} => {
for (vreg, alloc) in vregs.iter().zip(allocs.iter()) {
let reftyped = checker.reftyped_vregs.contains(vreg);
self.allocations
.insert(*alloc, CheckerValue::Reg(*vreg, reftyped));
}
}
&CheckerInst::DefAlloc { alloc, vreg } => {
let reftyped = checker.reftyped_vregs.contains(&vreg);
self.allocations
@@ -478,14 +467,6 @@ pub(crate) enum CheckerInst {
clobbers: Vec<PReg>,
},
/// The top of a block with blockparams. We define the given vregs
/// into the given allocations.
BlockParams {
block: Block,
vregs: Vec<VReg>,
allocs: Vec<Allocation>,
},
/// Define an allocation's contents. Like BlockParams but for one
/// allocation. Used sometimes when moves are elided but ownership
/// of a value is logically transferred to a new vreg.
@@ -514,7 +495,7 @@ impl<'a, F: Function> Checker<'a, F> {
let mut bb_insts = HashMap::new();
let mut reftyped_vregs = HashSet::new();
for block in 0..f.blocks() {
for block in 0..f.num_blocks() {
let block = Block::new(block);
bb_in.insert(block, Default::default());
bb_insts.insert(block, vec![]);
@@ -548,7 +529,7 @@ impl<'a, F: Function> Checker<'a, F> {
// For each original instruction, create an `Op`.
let mut last_inst = None;
let mut insert_idx = 0;
for block in 0..self.f.blocks() {
for block in 0..self.f.num_blocks() {
let block = Block::new(block);
for inst in self.f.block_insns(block).iter() {
assert!(last_inst.is_none() || inst > last_inst.unwrap());
@@ -617,17 +598,6 @@ impl<'a, F: Function> Checker<'a, F> {
.unwrap()
.push(CheckerInst::DefAlloc { alloc, vreg });
}
&Edit::BlockParams {
ref vregs,
ref allocs,
} => {
let inst = CheckerInst::BlockParams {
block,
vregs: vregs.clone(),
allocs: allocs.clone(),
};
self.bb_insts.get_mut(&block).unwrap().push(inst);
}
}
}
}
@@ -636,7 +606,7 @@ impl<'a, F: Function> Checker<'a, F> {
fn analyze(&mut self) {
let mut queue = VecDeque::new();
let mut queue_set = HashSet::new();
for block in 0..self.f.blocks() {
for block in 0..self.f.num_blocks() {
let block = Block::new(block);
queue.push_back(block);
queue_set.insert(block);
@@ -718,7 +688,7 @@ impl<'a, F: Function> Checker<'a, F> {
for vreg in self.f.reftype_vregs() {
log::trace!(" REF: {}", vreg);
}
for bb in 0..self.f.blocks() {
for bb in 0..self.f.num_blocks() {
let bb = Block::new(bb);
log::trace!("block{}:", bb.index());
let insts = self.bb_insts.get(&bb).unwrap();
@@ -743,17 +713,6 @@ impl<'a, F: Function> Checker<'a, F> {
&CheckerInst::Move { from, into } => {
log::trace!(" {} -> {}", from, into);
}
&CheckerInst::BlockParams {
ref vregs,
ref allocs,
..
} => {
let mut args = vec![];
for (vreg, alloc) in vregs.iter().zip(allocs.iter()) {
args.push(format!("{}:{}", vreg, alloc));
}
log::trace!(" blockparams: {}", args.join(", "));
}
&CheckerInst::DefAlloc { alloc, vreg } => {
log::trace!(" defalloc: {}:{}", vreg, alloc);
}

View File

@@ -75,11 +75,11 @@ pub struct Func {
}
impl Function for Func {
fn insts(&self) -> usize {
fn num_insts(&self) -> usize {
self.insts.len()
}
fn blocks(&self) -> usize {
fn num_blocks(&self) -> usize {
self.blocks.len()
}

View File

@@ -61,7 +61,7 @@ impl<'a, F: Function> Env<'a, F> {
pub fn dump_results(&self) {
log::info!("=== REGALLOC RESULTS ===");
for block in 0..self.func.blocks() {
for block in 0..self.func.num_blocks() {
let block = Block::new(block);
log::info!(
"block{}: [succs {:?} preds {:?}]",

View File

@@ -82,7 +82,7 @@ impl<'a, F: Function> Env<'a, F> {
self.vregs[v.vreg()].is_pinned = true;
}
// Create allocations too.
for inst in 0..self.func.insts() {
for inst in 0..self.func.num_insts() {
let start = self.allocs.len() as u32;
self.inst_alloc_offsets.push(start);
for _ in 0..self.func.inst_operands(Inst::new(inst)).len() {
@@ -247,7 +247,7 @@ impl<'a, F: Function> Env<'a, F> {
pub fn compute_liveness(&mut self) -> Result<(), RegAllocError> {
// Create initial LiveIn and LiveOut bitsets.
for _ in 0..self.func.blocks() {
for _ in 0..self.func.num_blocks() {
self.liveins.push(BitVec::new());
self.liveouts.push(BitVec::new());
}
@@ -347,7 +347,7 @@ impl<'a, F: Function> Env<'a, F> {
let mut vreg_ranges: Vec<LiveRangeIndex> =
vec![LiveRangeIndex::invalid(); self.func.num_vregs()];
for i in (0..self.func.blocks()).rev() {
for i in (0..self.func.num_blocks()).rev() {
let block = Block::new(i);
self.stats.livein_blocks += 1;

View File

@@ -303,7 +303,7 @@ impl<'a, F: Function> Env<'a, F> {
self.bundles[bundle.index()].spillset = ssidx;
}
for inst in 0..self.func.insts() {
for inst in 0..self.func.num_insts() {
let inst = Inst::new(inst);
// Attempt to merge Reuse-constraint operand outputs with the

View File

@@ -44,14 +44,14 @@ impl<'a, F: Function> Env<'a, F> {
cfginfo: CFGInfo,
annotations_enabled: bool,
) -> Self {
let n = func.insts();
let n = func.num_insts();
Self {
func,
env,
cfginfo,
liveins: Vec::with_capacity(func.blocks()),
liveouts: Vec::with_capacity(func.blocks()),
liveins: Vec::with_capacity(func.num_blocks()),
liveouts: Vec::with_capacity(func.num_blocks()),
blockparam_outs: vec![],
blockparam_ins: vec![],
blockparam_allocs: vec![],

View File

@@ -166,8 +166,8 @@ impl<'a, F: Function> Env<'a, F> {
}
}
let mut half_moves: Vec<HalfMove> = Vec::with_capacity(6 * self.func.insts());
let mut reuse_input_insts = Vec::with_capacity(self.func.insts() / 2);
let mut half_moves: Vec<HalfMove> = Vec::with_capacity(6 * self.func.num_insts());
let mut reuse_input_insts = Vec::with_capacity(self.func.num_insts() / 2);
let mut blockparam_in_idx = 0;
let mut blockparam_out_idx = 0;
@@ -290,7 +290,7 @@ impl<'a, F: Function> Env<'a, F> {
// same allocation) and if the vreg is live, add a
// Source half-move.
let mut block = self.cfginfo.insn_block[range.from.inst().index()];
while block.is_valid() && block.index() < self.func.blocks() {
while block.is_valid() && block.index() < self.func.num_blocks() {
if range.to < self.cfginfo.block_exit[block.index()].next() {
break;
}
@@ -376,7 +376,7 @@ impl<'a, F: Function> Env<'a, F> {
if self.cfginfo.block_entry[block.index()] < range.from {
block = block.next();
}
while block.is_valid() && block.index() < self.func.blocks() {
while block.is_valid() && block.index() < self.func.num_blocks() {
if self.cfginfo.block_entry[block.index()] >= range.to {
break;
}
@@ -1114,11 +1114,13 @@ impl<'a, F: Function> Env<'a, F> {
.collect::<Vec<_>>();
assert_eq!(vregs.len(), self.func.block_params(block).len());
assert_eq!(allocs.len(), self.func.block_params(block).len());
self.add_edit(
self.cfginfo.block_entry[block.index()],
InsertMovePrio::BlockParam,
Edit::BlockParams { vregs, allocs },
);
for (vreg, alloc) in vregs.into_iter().zip(allocs.into_iter()) {
self.add_edit(
self.cfginfo.block_entry[block.index()],
InsertMovePrio::BlockParam,
Edit::DefAlloc { alloc, vreg },
);
}
}
// Ensure edits are in sorted ProgPoint order. N.B.: this must
@@ -1139,13 +1141,6 @@ impl<'a, F: Function> Env<'a, F> {
format!("move {} -> {} ({:?})", from, to, to_vreg),
);
}
&Edit::BlockParams {
ref vregs,
ref allocs,
} => {
let s = format!("blockparams vregs:{:?} allocs:{:?}", vregs, allocs);
self.annotate(ProgPoint::from_index(pos), s);
}
&Edit::DefAlloc { alloc, vreg } => {
let s = format!("defalloc {:?} := {:?}", alloc, vreg);
self.annotate(ProgPoint::from_index(pos), s);

View File

@@ -798,7 +798,7 @@ impl<'a, F: Function> Env<'a, F> {
loop {
attempts += 1;
log::trace!("attempt {}, req {:?}", attempts, req);
debug_assert!(attempts < 100 * self.func.insts());
debug_assert!(attempts < 100 * self.func.num_insts());
let (class, fixed_preg) = match req {
Requirement::Fixed(preg) => (preg.class(), Some(preg)),

View File

@@ -37,8 +37,24 @@ pub enum RegClass {
}
/// A physical register. Contains a physical register number and a class.
///
/// The `hw_enc` field contains the physical register number and is in
/// a logically separate index space per class; in other words, Int
/// register 0 is different than Float register 0.
///
/// Because of bit-packed encodings throughout the implementation,
/// `hw_enc` must fit in 5 bits, i.e., at most 32 registers per class.
///
/// The value returned by `index()`, in contrast, is in a single index
/// space shared by all classes, in order to enable uniform reasoning
/// about physical registers. This is done by putting the class bit at
/// the MSB, or equivalently, declaring that indices 0..31 are the 32
/// integer registers and indices 32..63 are the 32 float registers.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PReg(u8, RegClass);
pub struct PReg {
hw_enc: u8,
class: RegClass,
}
impl PReg {
pub const MAX_BITS: usize = 5;
@@ -48,21 +64,31 @@ impl PReg {
/// Create a new PReg. The `hw_enc` range is 6 bits.
#[inline(always)]
pub const fn new(hw_enc: usize, class: RegClass) -> Self {
PReg(hw_enc as u8, class)
// We don't have const panics yet (rust-lang/rust#85194) so we
// need to use a little indexing trick here. We unfortunately
// can't use the `static-assertions` crate because we need
// this to work both for const `hw_enc` and for runtime
// values.
const HW_ENC_MUST_BE_IN_BOUNDS: &[bool; PReg::MAX + 1] = &[true; PReg::MAX + 1];
let _ = HW_ENC_MUST_BE_IN_BOUNDS[hw_enc];
PReg {
hw_enc: hw_enc as u8,
class,
}
}
/// The physical register number, as encoded by the ISA for the particular register class.
#[inline(always)]
pub fn hw_enc(self) -> usize {
let hw_enc = self.0 as usize;
debug_assert!(hw_enc <= Self::MAX);
let hw_enc = self.hw_enc as usize;
hw_enc
}
/// The register class.
#[inline(always)]
pub fn class(self) -> RegClass {
self.1
self.class
}
/// Get an index into the (not necessarily contiguous) index space of
@@ -70,7 +96,7 @@ impl PReg {
/// all PRegs and index it efficiently.
#[inline(always)]
pub fn index(self) -> usize {
((self.1 as u8 as usize) << 5) | (self.0 as usize)
((self.class as u8 as usize) << 5) | (self.hw_enc as usize)
}
#[inline(always)]
@@ -115,7 +141,9 @@ impl std::fmt::Display for PReg {
/// A virtual register. Contains a virtual register number and a class.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct VReg(u32);
pub struct VReg {
bits: u32,
}
impl VReg {
pub const MAX_BITS: usize = 20;
@@ -123,19 +151,25 @@ impl VReg {
#[inline(always)]
pub const fn new(virt_reg: usize, class: RegClass) -> Self {
VReg(((virt_reg as u32) << 1) | (class as u8 as u32))
// See comment in `PReg::new()`: we are emulating a const
// assert here until const panics are stable.
const VIRT_REG_MUST_BE_IN_BOUNDS: &[bool; VReg::MAX + 1] = &[true; VReg::MAX + 1];
let _ = VIRT_REG_MUST_BE_IN_BOUNDS[virt_reg];
VReg {
bits: ((virt_reg as u32) << 1) | (class as u8 as u32),
}
}
#[inline(always)]
pub fn vreg(self) -> usize {
let vreg = (self.0 >> 1) as usize;
debug_assert!(vreg <= Self::MAX);
let vreg = (self.bits >> 1) as usize;
vreg
}
#[inline(always)]
pub fn class(self) -> RegClass {
match self.0 & 1 {
match self.bits & 1 {
0 => RegClass::Int,
1 => RegClass::Float,
_ => unreachable!(),
@@ -166,21 +200,25 @@ impl std::fmt::Display for VReg {
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SpillSlot(u32);
pub struct SpillSlot {
bits: u32,
}
impl SpillSlot {
#[inline(always)]
pub fn new(slot: usize, class: RegClass) -> Self {
assert!(slot < (1 << 24));
SpillSlot((slot as u32) | (class as u8 as u32) << 24)
SpillSlot {
bits: (slot as u32) | (class as u8 as u32) << 24,
}
}
#[inline(always)]
pub fn index(self) -> usize {
(self.0 & 0x00ffffff) as usize
(self.bits & 0x00ffffff) as usize
}
#[inline(always)]
pub fn class(self) -> RegClass {
match (self.0 >> 24) as u8 {
match (self.bits >> 24) as u8 {
0 => RegClass::Int,
1 => RegClass::Float,
_ => unreachable!(),
@@ -193,7 +231,7 @@ impl SpillSlot {
#[inline(always)]
pub fn invalid() -> Self {
SpillSlot(0xffff_ffff)
SpillSlot { bits: 0xffff_ffff }
}
#[inline(always)]
pub fn is_invalid(self) -> bool {
@@ -211,17 +249,78 @@ impl std::fmt::Display for SpillSlot {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandConstraint {
/// Any location is fine (register or stack slot).
Any,
/// Operand must be in a register. Register is read-only for Uses.
Reg,
/// Operand must be on the stack.
Stack,
/// Operand must be in a fixed register.
FixedReg(PReg),
/// On defs only: reuse a use's register.
Reuse(usize),
}
impl std::fmt::Display for OperandConstraint {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Any => write!(f, "any"),
Self::Reg => write!(f, "reg"),
Self::Stack => write!(f, "stack"),
Self::FixedReg(preg) => write!(f, "fixed({})", preg),
Self::Reuse(idx) => write!(f, "reuse({})", idx),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandKind {
Def = 0,
Mod = 1,
Use = 2,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandPos {
Before = 0,
After = 1,
}
/// An `Operand` encodes everything about a mention of a register in
/// an instruction: virtual register number, and any constraint that
/// applies to the register at this program point.
///
/// An Operand may be a use or def (this corresponds to `LUse` and
/// `LAllocation` in Ion).
///
/// Generally, regalloc2 considers operands to have their effects at
/// one of two program points that surround an instruction: "Before"
/// or "After". All operands at a given program-point are assigned
/// non-conflicting locations based on their constraints. Each operand
/// has a "kind", one of use/def/mod, corresponding to
/// read/write/read-write, respectively.
///
/// Usually, an instruction's inputs will be uses-at-Before and
/// outputs will be defs-at-After, though there are valid use-cases
/// for other combinations too. For example, a single "instruction"
/// seen by the regalloc that lowers into multiple machine
/// instructions and reads some of its inputs after it starts to write
/// outputs must either make those input(s) uses-at-After or those
/// output(s) defs-at-Before so that the conflict (overlap) is
/// properly accounted for. See comments on the constructors below for
/// more.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Operand {
/// Bit-pack into 32 bits.
///
/// constraint:3 kind:2 pos:1 class:1 preg:5 vreg:20
///
/// where `constraint` is an `OperandConstraint`, `kind` is an
/// `OperandKind`, `pos` is an `OperandPos`, `class` is a
/// `RegClass`, `preg` is a `PReg` or an index for a reused-input
/// constraint, and `vreg` is a vreg index.
bits: u32,
}
@@ -259,6 +358,9 @@ impl Operand {
}
}
/// Create an `Operand` that designates a use of a VReg that must
/// be in a register, and that is used at the "before" point,
/// i.e., can be overwritten by a result.
#[inline(always)]
pub fn reg_use(vreg: VReg) -> Self {
Operand::new(
@@ -268,6 +370,10 @@ impl Operand {
OperandPos::Before,
)
}
/// Create an `Operand` that designates a use of a VReg that must
/// be in a register, and that is used up until the "after" point,
/// i.e., must not conflict with any results.
#[inline(always)]
pub fn reg_use_at_end(vreg: VReg) -> Self {
Operand::new(
@@ -277,6 +383,11 @@ impl Operand {
OperandPos::After,
)
}
/// Create an `Operand` that designates a definition of a VReg
/// that must be in a register, and that occurs at the "after"
/// point, i.e. may reuse a register that carried a use into this
/// instruction.
#[inline(always)]
pub fn reg_def(vreg: VReg) -> Self {
Operand::new(
@@ -286,6 +397,11 @@ impl Operand {
OperandPos::After,
)
}
/// Create an `Operand` that designates a definition of a VReg
/// that must be in a register, and that occurs early at the
/// "before" point, i.e., must not conflict with any input to the
/// instruction.
#[inline(always)]
pub fn reg_def_at_start(vreg: VReg) -> Self {
Operand::new(
@@ -295,8 +411,17 @@ impl Operand {
OperandPos::Before,
)
}
/// Create an `Operand` that designates a def (and use) of a
/// temporary *within* the instruction. This register is assumed
/// to be written by the instruction, and will not conflict with
/// any input or output, but should not be used after the
/// instruction completes.
#[inline(always)]
pub fn reg_temp(vreg: VReg) -> Self {
// For now a temp is equivalent to a def-at-start operand,
// which gives the desired semantics but does not enforce the
// "not reused later" constraint.
Operand::new(
vreg,
OperandConstraint::Reg,
@@ -304,6 +429,12 @@ impl Operand {
OperandPos::Before,
)
}
/// Create an `Operand` that designates a def of a vreg that must
/// reuse the register assigned to an input to the
/// instruction. The input is identified by `idx` (is the `idx`th
/// `Operand` for the instruction) and must be constraint to a
/// register, i.e., be the result of `Operand::reg_use(vreg)`.
#[inline(always)]
pub fn reg_reuse_def(vreg: VReg, idx: usize) -> Self {
Operand::new(
@@ -313,6 +444,11 @@ impl Operand {
OperandPos::After,
)
}
/// Create an `Operand` that designates a use of a vreg and
/// ensures that it is placed in the given, fixed PReg at the
/// use. It is guaranteed that the `Allocation` resulting for this
/// operand will be `preg`.
#[inline(always)]
pub fn reg_fixed_use(vreg: VReg, preg: PReg) -> Self {
Operand::new(
@@ -322,6 +458,11 @@ impl Operand {
OperandPos::Before,
)
}
/// Create an `Operand` that designates a def of a vreg and
/// ensures that it is placed in the given, fixed PReg at the
/// def. It is guaranteed that the `Allocation` resulting for this
/// operand will be `preg`.
#[inline(always)]
pub fn reg_fixed_def(vreg: VReg, preg: PReg) -> Self {
Operand::new(
@@ -332,12 +473,17 @@ impl Operand {
)
}
/// Get the virtual register designated by an operand. Every
/// operand must name some virtual register, even if it constrains
/// the operand to a fixed physical register as well; the vregs
/// are used to track dataflow.
#[inline(always)]
pub fn vreg(self) -> VReg {
let vreg_idx = ((self.bits as usize) & VReg::MAX) as usize;
VReg::new(vreg_idx, self.class())
}
/// Get the register class used by this operand.
#[inline(always)]
pub fn class(self) -> RegClass {
let class_field = (self.bits >> 25) & 1;
@@ -348,6 +494,8 @@ impl Operand {
}
}
/// Get the "kind" of this operand: a definition (write), a use
/// (read), or a "mod" / modify (a read followed by a write).
#[inline(always)]
pub fn kind(self) -> OperandKind {
let kind_field = (self.bits >> 27) & 3;
@@ -359,6 +507,10 @@ impl Operand {
}
}
/// Get the "position" of this operand, i.e., where its read
/// and/or write occurs: either before the instruction executes,
/// or after it does. Ordinarily, uses occur at "before" and defs
/// at "after", though there are cases where this is not true.
#[inline(always)]
pub fn pos(self) -> OperandPos {
let pos_field = (self.bits >> 26) & 1;
@@ -369,6 +521,8 @@ impl Operand {
}
}
/// Get the "constraint" of this operand, i.e., what requirements
/// its allocation must fulfill.
#[inline(always)]
pub fn constraint(self) -> OperandConstraint {
let constraint_field = (self.bits >> 29) & 7;
@@ -383,11 +537,14 @@ impl Operand {
}
}
/// Get the raw 32-bit encoding of this operand's fields.
#[inline(always)]
pub fn bits(self) -> u32 {
self.bits
}
/// Construct an `Operand` from the raw 32-bit encoding returned
/// from `bits()`.
#[inline(always)]
pub fn from_bits(bits: u32) -> Self {
debug_assert!(bits >> 29 <= 4);
@@ -425,45 +582,6 @@ impl std::fmt::Display for Operand {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandConstraint {
/// Any location is fine (register or stack slot).
Any,
/// Operand must be in a register. Register is read-only for Uses.
Reg,
/// Operand must be on the stack.
Stack,
/// Operand must be in a fixed register.
FixedReg(PReg),
/// On defs only: reuse a use's register. Which use is given by `preg` field.
Reuse(usize),
}
impl std::fmt::Display for OperandConstraint {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Any => write!(f, "any"),
Self::Reg => write!(f, "reg"),
Self::Stack => write!(f, "stack"),
Self::FixedReg(preg) => write!(f, "fixed({})", preg),
Self::Reuse(idx) => write!(f, "reuse({})", idx),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandKind {
Def = 0,
Mod = 1,
Use = 2,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandPos {
Before = 0,
After = 1,
}
/// An Allocation represents the end result of regalloc for an
/// Operand.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -511,7 +629,7 @@ impl Allocation {
#[inline(always)]
pub fn stack(slot: SpillSlot) -> Allocation {
Allocation::new(AllocationKind::Stack, slot.0 as usize)
Allocation::new(AllocationKind::Stack, slot.bits as usize)
}
#[inline(always)]
@@ -556,7 +674,9 @@ impl Allocation {
#[inline(always)]
pub fn as_stack(self) -> Option<SpillSlot> {
if self.kind() == AllocationKind::Stack {
Some(SpillSlot(self.index() as u32))
Some(SpillSlot {
bits: self.index() as u32,
})
} else {
None
}
@@ -604,10 +724,10 @@ pub trait Function {
// -------------
/// How many instructions are there?
fn insts(&self) -> usize;
fn num_insts(&self) -> usize;
/// How many blocks are there?
fn blocks(&self) -> usize;
fn num_blocks(&self) -> usize;
/// Get the index of the entry block.
fn entry_block(&self) -> Block;
@@ -649,6 +769,16 @@ pub trait Function {
fn branch_blockparam_arg_offset(&self, block: Block, insn: Inst) -> usize;
/// Determine whether an instruction is a safepoint and requires a stackmap.
///
/// Strictly speaking, these two parts (is a safepoint, requires a
/// stackmap) are orthogonal. An instruction could want to see a
/// stackmap of refs on the stack (without forcing them), or it
/// could want all refs to be on the stack (without knowing where
/// they are). Only the latter strictly follows from "is a
/// safepoint". But in practice, both are true at the same time,
/// so we combine the two notions: for regalloc2, a "safepoint
/// instruction" is one that both forces refs onto the stack, and
/// provides a stackmap indicating where they are.
fn is_safepoint(&self, _: Inst) -> bool {
false
}
@@ -664,7 +794,16 @@ pub trait Function {
/// Get the Operands for an instruction.
fn inst_operands(&self, insn: Inst) -> &[Operand];
/// Get the clobbers for an instruction.
/// Get the clobbers for an instruction; these are the registers
/// that the instruction is known to overwrite, separate from its
/// outputs described by its `Operand`s. This can be used to, for
/// example, describe ABI-specified registers that are not
/// preserved by a call instruction, or fixed physical registers
/// written by an instruction but not used as a vreg output, or
/// fixed physical registers used as temps within an instruction
/// out of necessity. Every register written to by an instruction
/// must either be described by an Operand of kind `Def` or `Mod`,
/// or else must be a "clobber".
fn inst_clobbers(&self, insn: Inst) -> &[PReg];
/// Get the number of `VReg` in use in this function.
@@ -743,7 +882,15 @@ pub trait Function {
}
}
/// A position before or after an instruction.
/// A position before or after an instruction at which we can make an
/// edit.
///
/// Note that this differs from `OperandPos` in that the former
/// describes specifically a constraint on an operand, while this
/// describes a program point. `OperandPos` could grow more options in
/// the future, for example if we decide that an "early write" or
/// "late read" phase makes sense, while `InstPosition` will always
/// describe these two insertion points.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(u8)]
pub enum InstPosition {
@@ -839,13 +986,7 @@ pub enum Edit {
to: Allocation,
to_vreg: Option<VReg>,
},
/// Define blockparams' locations. Note that this is not typically
/// turned into machine code, but can be useful metadata (e.g. for
/// the checker).
BlockParams {
vregs: Vec<VReg>,
allocs: Vec<Allocation>,
},
/// Define a particular Allocation to contain a particular VReg. Useful
/// for the checker.
DefAlloc { alloc: Allocation, vreg: VReg },
@@ -859,16 +1000,20 @@ pub enum Edit {
pub struct MachineEnv {
/// Physical registers. Every register that might be mentioned in
/// any constraint must be listed here, even if it is not
/// allocatable under normal conditions.
/// allocatable (present in one of
/// `{preferred,non_preferred}_regs_by_class`).
pub regs: Vec<PReg>,
/// Preferred physical registers for each class. These are the
/// registers that will be allocated first, if free.
pub preferred_regs_by_class: [Vec<PReg>; 2],
/// Non-preferred physical registers for each class. These are the
/// registers that will be allocated if a preferred register is
/// not available; using one of these is considered suboptimal,
/// but still better than spilling.
pub non_preferred_regs_by_class: [Vec<PReg>; 2],
/// One scratch register per class. This is needed to perform
/// moves between registers when cyclic move patterns occur. The
/// register should not be placed in either the preferred or
@@ -888,12 +1033,15 @@ pub struct MachineEnv {
pub struct Output {
/// How many spillslots are needed in the frame?
pub num_spillslots: usize,
/// Edits (insertions or removals). Guaranteed to be sorted by
/// program point.
pub edits: Vec<(ProgPoint, Edit)>,
/// Allocations for each operand. Mapping from instruction to
/// allocations provided by `inst_alloc_offsets` below.
pub allocs: Vec<Allocation>,
/// Allocation offset in `allocs` for each instruction.
pub inst_alloc_offsets: Vec<u32>,

View File

@@ -16,7 +16,7 @@ pub fn validate_ssa<F: Function>(f: &F, cfginfo: &CFGInfo) -> Result<(), RegAllo
// dominates this one. Also check that for every block param and
// inst def, that this is the only def.
let mut defined = vec![false; f.num_vregs()];
for block in 0..f.blocks() {
for block in 0..f.num_blocks() {
let block = Block::new(block);
for blockparam in f.block_params(block) {
if defined[blockparam.vreg()] {
@@ -62,7 +62,7 @@ pub fn validate_ssa<F: Function>(f: &F, cfginfo: &CFGInfo) -> Result<(), RegAllo
// number of blockparams in their succs, and that the end of every
// block ends in this branch or in a ret, and that there are no
// other branches or rets in the middle of the block.
for block in 0..f.blocks() {
for block in 0..f.num_blocks() {
let block = Block::new(block);
let insns = f.block_insns(block);
for insn in insns.iter() {