Remove pinned VRegs. (#108)

This commit is contained in:
Chris Fallin
2023-01-24 17:31:41 -08:00
committed by GitHub
parent e41c6140de
commit e09f6519a6
6 changed files with 222 additions and 576 deletions

View File

@@ -366,25 +366,8 @@ impl CheckerState {
}
}
fn initial_with_pinned_vregs<F: Function>(f: &F) -> CheckerState {
// Scan the function, looking for all vregs that are pinned
// vregs, gathering them with their PRegs.
let mut pinned_vregs: FxHashMap<VReg, PReg> = FxHashMap::default();
visit_all_vregs(f, |vreg: VReg| {
if let Some(preg) = f.is_pinned_vreg(vreg) {
pinned_vregs.insert(vreg, preg);
}
});
let mut allocs = FxHashMap::default();
for (vreg, preg) in pinned_vregs {
allocs.insert(
Allocation::reg(preg),
CheckerValue::VRegs(std::iter::once(vreg).collect()),
);
}
CheckerState::Allocations(allocs)
fn initial() -> Self {
CheckerState::Allocations(FxHashMap::default())
}
}
@@ -857,7 +840,7 @@ impl<'a, F: Function> Checker<'a, F> {
reftyped_vregs.insert(vreg);
}
bb_in.insert(f.entry_block(), CheckerState::initial_with_pinned_vregs(f));
bb_in.insert(f.entry_block(), CheckerState::default());
let mut stack_pregs = PRegSet::empty();
for &preg in &machine_env.fixed_stack_slots {
@@ -932,21 +915,11 @@ impl<'a, F: Function> Checker<'a, F> {
// move/edit framework, so we don't get allocs for these moves
// in the post-regalloc output, and the embedder is not
// supposed to emit the moves. But we *do* want to check the
// semantic implications, namely definition of new vregs and,
// for moves to/from pinned vregs, the implied register
// constraints. So we emit `ProgramMove` ops that do just
// this.
// semantic implications, namely definition of new vregs. So
// we emit `ProgramMove` ops that do just this.
if let Some((src, dst)) = self.f.is_move(inst) {
let src_preg = self.f.is_pinned_vreg(src.vreg());
let src_op = match src_preg {
Some(preg) => Operand::reg_fixed_use(src.vreg(), preg),
None => Operand::any_use(src.vreg()),
};
let dst_preg = self.f.is_pinned_vreg(dst.vreg());
let dst_op = match dst_preg {
Some(preg) => Operand::reg_fixed_def(dst.vreg(), preg),
None => Operand::any_def(dst.vreg()),
};
let src_op = Operand::any_use(src.vreg());
let dst_op = Operand::any_def(dst.vreg());
let checkinst = CheckerInst::ProgramMove {
inst,
src: src_op,

View File

@@ -140,6 +140,13 @@ impl AdaptiveMap {
&Self::Large(ref map) => AdaptiveMapIter::Large(map.iter()),
}
}
fn is_empty(&self) -> bool {
match self {
AdaptiveMap::Small { values, .. } => values.iter().all(|&value| value == 0),
AdaptiveMap::Large(m) => m.values().all(|&value| value == 0),
}
}
}
enum AdaptiveMapIter<'a> {
@@ -268,6 +275,11 @@ impl IndexSet {
_ => false,
}
}
/// Is the set empty?
pub(crate) fn is_empty(&self) -> bool {
self.elems.is_empty()
}
}
pub struct SetBitsIter(u64);

View File

@@ -13,9 +13,9 @@
//! Live-range computation.
use super::{
CodeRange, Env, InsertMovePrio, LiveBundle, LiveBundleIndex, LiveRange, LiveRangeFlag,
LiveRangeIndex, LiveRangeKey, LiveRangeListEntry, LiveRangeSet, PRegData, PRegIndex, RegClass,
SpillSetIndex, Use, VRegData, VRegIndex, SLOT_NONE,
CodeRange, Env, LiveBundle, LiveBundleIndex, LiveRange, LiveRangeFlag, LiveRangeIndex,
LiveRangeKey, LiveRangeListEntry, LiveRangeSet, PRegData, PRegIndex, RegClass, SpillSetIndex,
Use, VRegData, VRegIndex, SLOT_NONE,
};
use crate::indexset::IndexSet;
use crate::ion::data_structures::{
@@ -403,15 +403,13 @@ impl<'a, F: Function> Env<'a, F> {
self.liveins[block.index()] = live;
}
// Check that there are no liveins to the entry block, except
// for pinned vregs. (The client should create a virtual
// instruction that defines any other liveins if necessary.)
for livein in self.liveins[self.func.entry_block().index()].iter() {
let livein = self.vreg(VRegIndex::new(livein));
if self.func.is_pinned_vreg(livein).is_none() {
trace!("non-pinned-vreg livein to entry block: {}", livein);
return Err(RegAllocError::EntryLivein);
}
// Check that there are no liveins to the entry block.
if !self.liveins[self.func.entry_block().index()].is_empty() {
trace!(
"non-empty liveins to entry block: {:?}",
self.liveins[self.func.entry_block().index()]
);
return Err(RegAllocError::EntryLivein);
}
Ok(())
@@ -535,392 +533,128 @@ impl<'a, F: Function> Env<'a, F> {
debug_assert_eq!(dst.kind(), OperandKind::Def);
debug_assert_eq!(dst.pos(), OperandPos::Late);
let src_pinned = self.func.is_pinned_vreg(src.vreg());
let dst_pinned = self.func.is_pinned_vreg(dst.vreg());
// Redefine src and dst operands to have
// positions of After and Before respectively
// (see note below), and to have Any
// constraints if they were originally Reg.
let src_constraint = match src.constraint() {
OperandConstraint::Reg => OperandConstraint::Any,
x => x,
};
let dst_constraint = match dst.constraint() {
OperandConstraint::Reg => OperandConstraint::Any,
x => x,
};
let src = Operand::new(
src.vreg(),
src_constraint,
OperandKind::Use,
OperandPos::Late,
);
let dst = Operand::new(
dst.vreg(),
dst_constraint,
OperandKind::Def,
OperandPos::Early,
);
match (src_pinned, dst_pinned) {
// If both src and dest are pinned, emit
// the move right here, right now.
(Some(src_preg), Some(dst_preg)) => {
// Update LRs.
if !live.get(src.vreg().vreg()) {
let lr = self.add_liverange_to_vreg(
VRegIndex::new(src.vreg().vreg()),
CodeRange {
from: self.cfginfo.block_entry[block.index()],
to: ProgPoint::after(inst),
},
);
live.set(src.vreg().vreg(), true);
vreg_ranges[src.vreg().vreg()] = lr;
}
if live.get(dst.vreg().vreg()) {
let lr = vreg_ranges[dst.vreg().vreg()];
self.ranges[lr.index()].range.from = ProgPoint::after(inst);
live.set(dst.vreg().vreg(), false);
} else {
self.add_liverange_to_vreg(
VRegIndex::new(dst.vreg().vreg()),
CodeRange {
from: ProgPoint::after(inst),
to: ProgPoint::before(inst.next()),
},
);
}
self.insert_move(
ProgPoint::before(inst),
InsertMovePrio::MultiFixedRegInitial,
Allocation::reg(src_preg),
Allocation::reg(dst_preg),
dst.vreg(),
);
}
// If exactly one of source and dest (but
// not both) is a pinned-vreg, convert
// this into a ghost use on the other vreg
// with a FixedReg constraint.
(Some(preg), None) | (None, Some(preg)) => {
trace!(
" -> exactly one of src/dst is pinned; converting to ghost use"
);
let (vreg, pinned_vreg, kind, pos, progpoint) =
if src_pinned.is_some() {
// Source is pinned: this is a def on the dst with a pinned preg.
(
dst.vreg(),
src.vreg(),
OperandKind::Def,
OperandPos::Late,
ProgPoint::before(inst),
)
} else {
// Dest is pinned: this is a use on the src with a pinned preg.
(
src.vreg(),
dst.vreg(),
OperandKind::Use,
OperandPos::Early,
ProgPoint::after(inst),
)
};
let constraint = OperandConstraint::FixedReg(preg);
let operand = Operand::new(vreg, constraint, kind, pos);
trace!(
concat!(
" -> preg {:?} vreg {:?} kind {:?} ",
"pos {:?} progpoint {:?} constraint {:?} operand {:?}"
),
preg,
vreg,
kind,
pos,
progpoint,
constraint,
operand
);
// Get the LR for the vreg; if none, create one.
let mut lr = vreg_ranges[vreg.vreg()];
if !live.get(vreg.vreg()) {
let from = match kind {
OperandKind::Use => self.cfginfo.block_entry[block.index()],
OperandKind::Def => progpoint,
_ => unreachable!(),
};
let to = progpoint.next();
lr = self.add_liverange_to_vreg(
VRegIndex::new(vreg.vreg()),
CodeRange { from, to },
);
trace!(" -> dead; created LR");
}
trace!(" -> LR {:?}", lr);
self.insert_use_into_liverange(
lr,
Use::new(operand, progpoint, SLOT_NONE),
);
if kind == OperandKind::Def {
live.set(vreg.vreg(), false);
if self.ranges[lr.index()].range.from
== self.cfginfo.block_entry[block.index()]
{
self.ranges[lr.index()].range.from = progpoint;
}
self.ranges[lr.index()].set_flag(LiveRangeFlag::StartsAtDef);
} else {
live.set(vreg.vreg(), true);
vreg_ranges[vreg.vreg()] = lr;
}
// Handle liveness of the other vreg. Note
// that this is somewhat special. For the
// destination case, we want the pinned
// vreg's LR to start just *after* the
// operand we inserted above, because
// otherwise it would overlap, and
// interfere, and prevent allocation. For
// the source case, we want to "poke a
// hole" in the LR: if it's live going
// downward, end it just after the operand
// and restart it before; if it isn't
// (this is the last use), start it
// before.
if kind == OperandKind::Def {
trace!(" -> src on pinned vreg {:?}", pinned_vreg);
// The *other* vreg is a def, so the pinned-vreg
// mention is a use. If already live,
// end the existing LR just *after*
// the `progpoint` defined above and
// start a new one just *before* the
// `progpoint` defined above,
// preserving the start. If not, start
// a new one live back to the top of
// the block, starting just before
// `progpoint`.
if live.get(pinned_vreg.vreg()) {
let pinned_lr = vreg_ranges[pinned_vreg.vreg()];
let orig_start = self.ranges[pinned_lr.index()].range.from;
// Following instruction start
// (so we don't transition in
// middle of inst).
let new_start = ProgPoint::before(progpoint.inst().next());
trace!(
" -> live with LR {:?}; truncating to start at {:?}",
pinned_lr,
new_start,
);
self.ranges[pinned_lr.index()].range.from = new_start;
let new_lr = self.add_liverange_to_vreg(
VRegIndex::new(pinned_vreg.vreg()),
CodeRange {
from: orig_start,
to: progpoint,
},
);
vreg_ranges[pinned_vreg.vreg()] = new_lr;
trace!(" -> created LR {:?} with remaining range from {:?} to {:?}", new_lr, orig_start, progpoint);
// Add an edit right now to indicate that at
// this program point, the given
// preg is now known as that vreg,
// not the preg, but immediately
// after, it is known as the preg
// again. This is used by the
// checker.
self.insert_move(
ProgPoint::after(inst),
InsertMovePrio::Regular,
Allocation::reg(preg),
Allocation::reg(preg),
dst.vreg(),
);
self.insert_move(
ProgPoint::before(inst.next()),
InsertMovePrio::MultiFixedRegInitial,
Allocation::reg(preg),
Allocation::reg(preg),
src.vreg(),
);
} else {
if inst > self.cfginfo.block_entry[block.index()].inst() {
let new_lr = self.add_liverange_to_vreg(
VRegIndex::new(pinned_vreg.vreg()),
CodeRange {
from: self.cfginfo.block_entry[block.index()],
to: progpoint,
},
);
vreg_ranges[pinned_vreg.vreg()] = new_lr;
live.set(pinned_vreg.vreg(), true);
trace!(" -> was not live; created new LR {:?}", new_lr);
}
// Add an edit right now to indicate that at
// this program point, the given
// preg is now known as that vreg,
// not the preg. This is used by
// the checker.
self.insert_move(
ProgPoint::after(inst),
InsertMovePrio::BlockParam,
Allocation::reg(preg),
Allocation::reg(preg),
dst.vreg(),
);
}
} else {
trace!(" -> dst on pinned vreg {:?}", pinned_vreg);
// The *other* vreg is a use, so the pinned-vreg
// mention is a def. Truncate its LR
// just *after* the `progpoint`
// defined above.
if live.get(pinned_vreg.vreg()) {
let pinned_lr = vreg_ranges[pinned_vreg.vreg()];
self.ranges[pinned_lr.index()].range.from =
progpoint.next();
trace!(
" -> was live with LR {:?}; truncated start to {:?}",
pinned_lr,
progpoint.next()
);
live.set(pinned_vreg.vreg(), false);
// Add a no-op edit right now to indicate that
// at this program point, the
// given preg is now known as that
// preg, not the vreg. This is
// used by the checker.
self.insert_move(
ProgPoint::before(inst.next()),
InsertMovePrio::PostRegular,
Allocation::reg(preg),
Allocation::reg(preg),
dst.vreg(),
);
}
// Otherwise, if dead, no need to create
// a dummy LR -- there is no
// reservation to make (the other vreg
// will land in the reg with the
// fixed-reg operand constraint, but
// it's a dead move anyway).
}
}
// Ordinary move between two non-pinned vregs.
(None, None) => {
// Redefine src and dst operands to have
// positions of After and Before respectively
// (see note below), and to have Any
// constraints if they were originally Reg.
let src_constraint = match src.constraint() {
OperandConstraint::Reg => OperandConstraint::Any,
x => x,
};
let dst_constraint = match dst.constraint() {
OperandConstraint::Reg => OperandConstraint::Any,
x => x,
};
let src = Operand::new(
src.vreg(),
if self.annotations_enabled {
self.annotate(
ProgPoint::after(inst),
format!(
" prog-move v{} ({:?}) -> v{} ({:?})",
src.vreg().vreg(),
src_constraint,
OperandKind::Use,
OperandPos::Late,
);
let dst = Operand::new(
dst.vreg(),
dst.vreg().vreg(),
dst_constraint,
OperandKind::Def,
OperandPos::Early,
);
),
);
}
if self.annotations_enabled {
self.annotate(
ProgPoint::after(inst),
format!(
" prog-move v{} ({:?}) -> v{} ({:?})",
src.vreg().vreg(),
src_constraint,
dst.vreg().vreg(),
dst_constraint,
),
);
}
// N.B.: in order to integrate with the move
// resolution that joins LRs in general, we
// conceptually treat the move as happening
// between the move inst's After and the next
// inst's Before. Thus the src LR goes up to
// (exclusive) next-inst-pre, and the dst LR
// starts at next-inst-pre. We have to take
// care in our move insertion to handle this
// like other inter-inst moves, i.e., at
// `Regular` priority, so it properly happens
// in parallel with other inter-LR moves.
//
// Why the progpoint between move and next
// inst, and not the progpoint between prev
// inst and move? Because a move can be the
// first inst in a block, but cannot be the
// last; so the following progpoint is always
// within the same block, while the previous
// one may be an inter-block point (and the
// After of the prev inst in a different
// block).
// N.B.: in order to integrate with the move
// resolution that joins LRs in general, we
// conceptually treat the move as happening
// between the move inst's After and the next
// inst's Before. Thus the src LR goes up to
// (exclusive) next-inst-pre, and the dst LR
// starts at next-inst-pre. We have to take
// care in our move insertion to handle this
// like other inter-inst moves, i.e., at
// `Regular` priority, so it properly happens
// in parallel with other inter-LR moves.
//
// Why the progpoint between move and next
// inst, and not the progpoint between prev
// inst and move? Because a move can be the
// first inst in a block, but cannot be the
// last; so the following progpoint is always
// within the same block, while the previous
// one may be an inter-block point (and the
// After of the prev inst in a different
// block).
// Handle the def w.r.t. liveranges: trim the
// start of the range and mark it dead at this
// point in our backward scan.
let pos = ProgPoint::before(inst.next());
let mut dst_lr = vreg_ranges[dst.vreg().vreg()];
if !live.get(dst.vreg().vreg()) {
let from = pos;
let to = pos.next();
dst_lr = self.add_liverange_to_vreg(
VRegIndex::new(dst.vreg().vreg()),
CodeRange { from, to },
);
trace!(" -> invalid LR for def; created {:?}", dst_lr);
}
trace!(" -> has existing LR {:?}", dst_lr);
// Trim the LR to start here.
if self.ranges[dst_lr.index()].range.from
== self.cfginfo.block_entry[block.index()]
{
trace!(" -> started at block start; trimming to {:?}", pos);
self.ranges[dst_lr.index()].range.from = pos;
}
self.ranges[dst_lr.index()].set_flag(LiveRangeFlag::StartsAtDef);
live.set(dst.vreg().vreg(), false);
vreg_ranges[dst.vreg().vreg()] = LiveRangeIndex::invalid();
// Handle the def w.r.t. liveranges: trim the
// start of the range and mark it dead at this
// point in our backward scan.
let pos = ProgPoint::before(inst.next());
let mut dst_lr = vreg_ranges[dst.vreg().vreg()];
if !live.get(dst.vreg().vreg()) {
let from = pos;
let to = pos.next();
dst_lr = self.add_liverange_to_vreg(
VRegIndex::new(dst.vreg().vreg()),
CodeRange { from, to },
);
trace!(" -> invalid LR for def; created {:?}", dst_lr);
}
trace!(" -> has existing LR {:?}", dst_lr);
// Trim the LR to start here.
if self.ranges[dst_lr.index()].range.from
== self.cfginfo.block_entry[block.index()]
{
trace!(" -> started at block start; trimming to {:?}", pos);
self.ranges[dst_lr.index()].range.from = pos;
}
self.ranges[dst_lr.index()].set_flag(LiveRangeFlag::StartsAtDef);
live.set(dst.vreg().vreg(), false);
vreg_ranges[dst.vreg().vreg()] = LiveRangeIndex::invalid();
// Handle the use w.r.t. liveranges: make it live
// and create an initial LR back to the start of
// the block.
let pos = ProgPoint::after(inst);
let src_lr = if !live.get(src.vreg().vreg()) {
let range = CodeRange {
from: self.cfginfo.block_entry[block.index()],
to: pos.next(),
};
let src_lr = self
.add_liverange_to_vreg(VRegIndex::new(src.vreg().vreg()), range);
vreg_ranges[src.vreg().vreg()] = src_lr;
src_lr
} else {
vreg_ranges[src.vreg().vreg()]
};
// Handle the use w.r.t. liveranges: make it live
// and create an initial LR back to the start of
// the block.
let pos = ProgPoint::after(inst);
let src_lr = if !live.get(src.vreg().vreg()) {
let range = CodeRange {
from: self.cfginfo.block_entry[block.index()],
to: pos.next(),
};
let src_lr = self.add_liverange_to_vreg(
VRegIndex::new(src.vreg().vreg()),
range,
);
vreg_ranges[src.vreg().vreg()] = src_lr;
src_lr
} else {
vreg_ranges[src.vreg().vreg()]
};
trace!(" -> src LR {:?}", src_lr);
trace!(" -> src LR {:?}", src_lr);
// Add to live-set.
let src_is_dead_after_move = !live.get(src.vreg().vreg());
live.set(src.vreg().vreg(), true);
// Add to live-set.
let src_is_dead_after_move = !live.get(src.vreg().vreg());
live.set(src.vreg().vreg(), true);
// Add to program-moves lists.
self.prog_move_srcs.push((
(VRegIndex::new(src.vreg().vreg()), inst),
Allocation::none(),
));
self.prog_move_dsts.push((
(VRegIndex::new(dst.vreg().vreg()), inst.next()),
Allocation::none(),
));
self.stats.prog_moves += 1;
if src_is_dead_after_move {
self.stats.prog_moves_dead_src += 1;
self.prog_move_merges.push((src_lr, dst_lr));
}
}
// Add to program-moves lists.
self.prog_move_srcs.push((
(VRegIndex::new(src.vreg().vreg()), inst),
Allocation::none(),
));
self.prog_move_dsts.push((
(VRegIndex::new(dst.vreg().vreg()), inst.next()),
Allocation::none(),
));
self.stats.prog_moves += 1;
if src_is_dead_after_move {
self.stats.prog_moves_dead_src += 1;
self.prog_move_merges.push((src_lr, dst_lr));
}
}
@@ -1240,9 +974,6 @@ impl<'a, F: Function> Env<'a, F> {
// Insert safepoint virtual stack uses, if needed.
for &vreg in self.func.reftype_vregs() {
if self.func.is_pinned_vreg(vreg).is_some() {
continue;
}
let vreg = VRegIndex::new(vreg.vreg());
let mut inserted = false;
let mut safepoint_idx = 0;

View File

@@ -13,8 +13,7 @@
//! Bundle merging.
use super::{
Env, LiveBundleIndex, LiveRangeIndex, LiveRangeKey, SpillSet, SpillSetIndex, SpillSlotIndex,
VRegIndex,
Env, LiveBundleIndex, LiveRangeIndex, SpillSet, SpillSetIndex, SpillSlotIndex, VRegIndex,
};
use crate::{
ion::data_structures::BlockparamOut, Function, Inst, OperandConstraint, OperandKind, PReg,
@@ -245,19 +244,6 @@ impl<'a, F: Function> Env<'a, F> {
continue;
}
// If this is a pinned vreg, go ahead and add it to the
// commitment map, and avoid creating a bundle entirely.
if let Some(preg) = self.func.is_pinned_vreg(self.vreg(vreg)) {
for entry in &self.vregs[vreg.index()].ranges {
let key = LiveRangeKey::from_range(&entry.range);
self.pregs[preg.index()]
.allocations
.btree
.insert(key, LiveRangeIndex::invalid());
}
continue;
}
let bundle = self.create_bundle();
self.bundles[bundle.index()].ranges = self.vregs[vreg.index()].ranges.clone();
trace!("vreg v{} gets bundle{}", vreg.index(), bundle.index());
@@ -325,11 +311,6 @@ impl<'a, F: Function> Env<'a, F> {
if let OperandConstraint::Reuse(reuse_idx) = op.constraint() {
let src_vreg = op.vreg();
let dst_vreg = self.func.inst_operands(inst)[reuse_idx].vreg();
if self.func.is_pinned_vreg(src_vreg).is_some()
|| self.func.is_pinned_vreg(dst_vreg).is_some()
{
continue;
}
trace!(
"trying to merge reused-input def: src {} to dst {}",
@@ -382,26 +363,6 @@ impl<'a, F: Function> Env<'a, F> {
dst
);
let dst_vreg = self.vreg(self.ranges[dst.index()].vreg);
let src_vreg = self.vreg(self.ranges[src.index()].vreg);
if self.func.is_pinned_vreg(src_vreg).is_some()
&& self.func.is_pinned_vreg(dst_vreg).is_some()
{
continue;
}
if let Some(preg) = self.func.is_pinned_vreg(src_vreg) {
let dest_bundle = self.ranges[dst.index()].bundle;
let spillset = self.bundles[dest_bundle.index()].spillset;
self.spillsets[spillset.index()].reg_hint = preg;
continue;
}
if let Some(preg) = self.func.is_pinned_vreg(dst_vreg) {
let src_bundle = self.ranges[src.index()].bundle;
let spillset = self.bundles[src_bundle.index()].spillset;
self.spillsets[spillset.index()].reg_hint = preg;
continue;
}
let src_bundle = self.ranges[src.index()].bundle;
debug_assert!(src_bundle.is_valid());
let dest_bundle = self.ranges[dst.index()].bundle;

View File

@@ -187,8 +187,6 @@ impl<'a, F: Function> Env<'a, F> {
continue;
}
let pinned_alloc = self.func.is_pinned_vreg(self.vreg(vreg));
// For each range in each vreg, insert moves or
// half-moves. We also scan over `blockparam_ins` and
// `blockparam_outs`, which are sorted by (block, vreg),
@@ -196,17 +194,14 @@ impl<'a, F: Function> Env<'a, F> {
let mut prev = LiveRangeIndex::invalid();
for range_idx in 0..self.vregs[vreg.index()].ranges.len() {
let entry = self.vregs[vreg.index()].ranges[range_idx];
let alloc = pinned_alloc
.map(|preg| Allocation::reg(preg))
.unwrap_or_else(|| self.get_alloc_for_range(entry.index));
let alloc = self.get_alloc_for_range(entry.index);
let range = entry.range;
trace!(
"apply_allocations: vreg {:?} LR {:?} with range {:?} has alloc {:?} (pinned {:?})",
"apply_allocations: vreg {:?} LR {:?} with range {:?} has alloc {:?}",
vreg,
entry.index,
range,
alloc,
pinned_alloc,
);
debug_assert!(alloc != Allocation::none());
@@ -253,10 +248,7 @@ impl<'a, F: Function> Env<'a, F> {
// can't insert a move that logically happens just
// before After (i.e. in the middle of a single
// instruction).
//
// Also note that this case is not applicable to
// pinned vregs (because they are always in one PReg).
if pinned_alloc.is_none() && prev.is_valid() {
if prev.is_valid() {
let prev_alloc = self.get_alloc_for_range(prev);
let prev_range = self.ranges[prev.index()].range;
let first_is_def =
@@ -286,99 +278,93 @@ impl<'a, F: Function> Env<'a, F> {
}
}
// The block-to-block edge-move logic is not
// applicable to pinned vregs, which are always in one
// PReg (so never need moves within their own vreg
// ranges).
if pinned_alloc.is_none() {
// Scan over blocks whose ends are covered by this
// range. For each, for each successor that is not
// already in this range (hence guaranteed to have the
// same allocation) and if the vreg is live, add a
// Source half-move.
let mut block = self.cfginfo.insn_block[range.from.inst().index()];
while block.is_valid() && block.index() < self.func.num_blocks() {
if range.to < self.cfginfo.block_exit[block.index()].next() {
// Scan over blocks whose ends are covered by this
// range. For each, for each successor that is not
// already in this range (hence guaranteed to have the
// same allocation) and if the vreg is live, add a
// Source half-move.
let mut block = self.cfginfo.insn_block[range.from.inst().index()];
while block.is_valid() && block.index() < self.func.num_blocks() {
if range.to < self.cfginfo.block_exit[block.index()].next() {
break;
}
trace!("examining block with end in range: block{}", block.index());
for &succ in self.func.block_succs(block) {
trace!(
" -> has succ block {} with entry {:?}",
succ.index(),
self.cfginfo.block_entry[succ.index()]
);
if range.contains_point(self.cfginfo.block_entry[succ.index()]) {
continue;
}
trace!(" -> out of this range, requires half-move if live");
if self.is_live_in(succ, vreg) {
trace!(" -> live at input to succ, adding halfmove");
half_moves.push(HalfMove {
key: half_move_key(block, succ, vreg, HalfMoveKind::Source),
alloc,
});
}
}
// Scan forward in `blockparam_outs`, adding all
// half-moves for outgoing values to blockparams
// in succs.
trace!(
"scanning blockparam_outs for v{} block{}: blockparam_out_idx = {}",
vreg.index(),
block.index(),
blockparam_out_idx,
);
while blockparam_out_idx < self.blockparam_outs.len() {
let BlockparamOut {
from_vreg,
from_block,
to_block,
to_vreg,
} = self.blockparam_outs[blockparam_out_idx];
if (from_vreg, from_block) > (vreg, block) {
break;
}
trace!("examining block with end in range: block{}", block.index());
for &succ in self.func.block_succs(block) {
if (from_vreg, from_block) == (vreg, block) {
trace!(
" -> has succ block {} with entry {:?}",
succ.index(),
self.cfginfo.block_entry[succ.index()]
" -> found: from v{} block{} to v{} block{}",
from_vreg.index(),
from_block.index(),
to_vreg.index(),
to_vreg.index()
);
if range.contains_point(self.cfginfo.block_entry[succ.index()]) {
continue;
}
trace!(" -> out of this range, requires half-move if live");
if self.is_live_in(succ, vreg) {
trace!(" -> live at input to succ, adding halfmove");
half_moves.push(HalfMove {
key: half_move_key(block, succ, vreg, HalfMoveKind::Source),
alloc,
});
}
}
half_moves.push(HalfMove {
key: half_move_key(
from_block,
to_block,
to_vreg,
HalfMoveKind::Source,
),
alloc,
});
// Scan forward in `blockparam_outs`, adding all
// half-moves for outgoing values to blockparams
// in succs.
trace!(
"scanning blockparam_outs for v{} block{}: blockparam_out_idx = {}",
vreg.index(),
block.index(),
blockparam_out_idx,
);
while blockparam_out_idx < self.blockparam_outs.len() {
let BlockparamOut {
from_vreg,
from_block,
to_block,
to_vreg,
} = self.blockparam_outs[blockparam_out_idx];
if (from_vreg, from_block) > (vreg, block) {
break;
}
if (from_vreg, from_block) == (vreg, block) {
trace!(
" -> found: from v{} block{} to v{} block{}",
from_vreg.index(),
from_block.index(),
to_vreg.index(),
to_vreg.index()
);
half_moves.push(HalfMove {
key: half_move_key(
from_block,
to_block,
to_vreg,
HalfMoveKind::Source,
if self.annotations_enabled {
self.annotate(
self.cfginfo.block_exit[block.index()],
format!(
"blockparam-out: block{} to block{}: v{} to v{} in {}",
from_block.index(),
to_block.index(),
from_vreg.index(),
to_vreg.index(),
alloc
),
alloc,
});
if self.annotations_enabled {
self.annotate(
self.cfginfo.block_exit[block.index()],
format!(
"blockparam-out: block{} to block{}: v{} to v{} in {}",
from_block.index(),
to_block.index(),
from_vreg.index(),
to_vreg.index(),
alloc
),
);
}
);
}
blockparam_out_idx += 1;
}
block = block.next();
blockparam_out_idx += 1;
}
block = block.next();
// Scan over blocks whose beginnings are covered by
// this range and for which the vreg is live at the
// start of the block. For each, for each predecessor,

View File

@@ -1117,23 +1117,6 @@ pub trait Function {
&[]
}
/// Is the given vreg pinned to a preg? If so, every use of the
/// vreg is automatically assigned to the preg, and live-ranges of
/// the vreg allocate the preg exclusively (are not spilled
/// elsewhere). The user must take care not to have too many live
/// pinned vregs such that allocation is no longer possible;
/// liverange computation will check that this is the case (that
/// there are enough remaining allocatable pregs of every class to
/// hold all Reg-constrained operands).
///
/// Pinned vregs are implicitly live-in to the function: that is,
/// one can use a pinned vreg without having first defined it, and
/// this will take the value that that physical register (to which
/// the vreg is pinned) had at function entry.
fn is_pinned_vreg(&self, _: VReg) -> Option<PReg> {
None
}
// --------------
// Spills/reloads
// --------------