Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -7,7 +7,7 @@ use alloc::vec::Vec;
use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList};
use crate::ir::{Block, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList};
use crate::isa::TargetIsa;
use crate::topo_order::TopoOrder;
@@ -43,12 +43,12 @@ struct Context<'a> {
impl<'a> Context<'a> {
fn run(&mut self) {
// Any ebb order will do.
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
// Any block order will do.
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
// Branches can only be at the last or second to last position in an extended basic
// block.
self.cur.goto_last_inst(ebb);
self.cur.goto_last_inst(block);
let terminator_inst = self.cur.current_inst().expect("terminator");
if let Some(inst) = self.cur.prev_inst() {
let opcode = self.cur.func.dfg[inst].opcode();
@@ -80,38 +80,38 @@ impl<'a> Context<'a> {
// If there are any parameters, split the edge.
if self.should_split_edge(target) {
// Create the block the branch will jump to.
let new_ebb = self.cur.func.dfg.make_ebb();
let new_block = self.cur.func.dfg.make_block();
// Insert the new block before the destination, such that it can fallthrough in the
// target block.
assert_ne!(Some(target), self.cur.layout().entry_block());
self.cur.layout_mut().insert_ebb(new_ebb, target);
self.cur.layout_mut().insert_block(new_block, target);
self.has_new_blocks = true;
// Extract the arguments of the branch instruction, split the Ebb parameters and the
// Extract the arguments of the branch instruction, split the Block parameters and the
// branch arguments
let num_fixed = opcode.constraints().num_fixed_value_arguments();
let dfg = &mut self.cur.func.dfg;
let old_args: Vec<_> = {
let args = dfg[branch].take_value_list().expect("ebb parameters");
let args = dfg[branch].take_value_list().expect("block parameters");
args.as_slice(&dfg.value_lists).iter().copied().collect()
};
let (branch_args, ebb_params) = old_args.split_at(num_fixed);
let (branch_args, block_params) = old_args.split_at(num_fixed);
// Replace the branch destination by the new Ebb created with no parameters, and restore
// the branch arguments, without the original Ebb parameters.
// Replace the branch destination by the new Block created with no parameters, and restore
// the branch arguments, without the original Block parameters.
{
let branch_args = ValueList::from_slice(branch_args, &mut dfg.value_lists);
let data = &mut dfg[branch];
*data.branch_destination_mut().expect("branch") = new_ebb;
*data.branch_destination_mut().expect("branch") = new_block;
data.put_value_list(branch_args);
}
let ok = self.cur.func.update_encoding(branch, self.cur.isa).is_ok();
debug_assert!(ok);
// Insert a jump to the original target with its arguments into the new block.
self.cur.goto_first_insertion_point(new_ebb);
self.cur.ins().jump(target, ebb_params);
self.cur.goto_first_insertion_point(new_block);
self.cur.ins().jump(target, block_params);
// Reset the cursor to point to the branch.
self.cur.goto_inst(branch);
@@ -122,7 +122,7 @@ impl<'a> Context<'a> {
let inst_data = &self.cur.func.dfg[inst];
let opcode = inst_data.opcode();
if opcode != Opcode::Jump && opcode != Opcode::Fallthrough {
// This opcode is ignored as it does not have any EBB parameters.
// This opcode is ignored as it does not have any block parameters.
if opcode != Opcode::IndirectJumpTableBr {
debug_assert!(!opcode.is_branch())
}
@@ -141,23 +141,23 @@ impl<'a> Context<'a> {
// If there are any parameters, split the edge.
if self.should_split_edge(*target) {
// Create the block the branch will jump to.
let new_ebb = self.cur.func.dfg.make_ebb();
let new_block = self.cur.func.dfg.make_block();
self.has_new_blocks = true;
// Split the current block before its terminator, and insert a new jump instruction to
// jump to it.
let jump = self.cur.ins().jump(new_ebb, &[]);
self.cur.insert_ebb(new_ebb);
let jump = self.cur.ins().jump(new_block, &[]);
self.cur.insert_block(new_block);
// Reset the cursor to point to new terminator of the old ebb.
// Reset the cursor to point to new terminator of the old block.
self.cur.goto_inst(jump);
}
}
/// Returns whether we should introduce a new branch.
fn should_split_edge(&self, target: Ebb) -> bool {
fn should_split_edge(&self, target: Block) -> bool {
// We should split the edge if the target has any parameters.
if !self.cur.func.dfg.ebb_params(target).is_empty() {
if !self.cur.func.dfg.block_params(target).is_empty() {
return true;
};

View File

@@ -2,16 +2,16 @@
//!
//! Conventional SSA (CSSA) form is a subset of SSA form where any (transitively) phi-related
//! values do not interfere. We construct CSSA by building virtual registers that are as large as
//! possible and inserting copies where necessary such that all argument values passed to an EBB
//! parameter will belong to the same virtual register as the EBB parameter value itself.
//! possible and inserting copies where necessary such that all argument values passed to an block
//! parameter will belong to the same virtual register as the block parameter value itself.
use crate::cursor::{Cursor, EncCursor};
use crate::dbg::DisplayList;
use crate::dominator_tree::{DominatorTree, DominatorTreePreorder};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::fx::FxHashMap;
use crate::ir::{self, InstBuilder, ProgramOrder};
use crate::ir::{Ebb, ExpandedProgramPoint, Function, Inst, Value};
use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Value};
use crate::isa::{EncInfo, TargetIsa};
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liveness::Liveness;
@@ -40,8 +40,8 @@ use log::debug;
//
// Phase 1: Union-find.
//
// We use the union-find support in `VirtRegs` to build virtual registers such that EBB parameter
// values always belong to the same virtual register as their corresponding EBB arguments at the
// We use the union-find support in `VirtRegs` to build virtual registers such that block parameter
// values always belong to the same virtual register as their corresponding block arguments at the
// predecessor branches. Trivial interferences between parameter and argument value live ranges are
// detected and resolved before unioning congruence classes, but non-trivial interferences between
// values that end up in the same congruence class are possible.
@@ -135,8 +135,8 @@ impl Coalescing {
};
// Run phase 1 (union-find) of the coalescing algorithm on the current function.
for &ebb in domtree.cfg_postorder() {
context.union_find_ebb(ebb);
for &block in domtree.cfg_postorder() {
context.union_find_block(block);
}
context.finish_union_find();
@@ -147,114 +147,114 @@ impl Coalescing {
/// Phase 1: Union-find.
///
/// The two entry points for phase 1 are `union_find_ebb()` and `finish_union_find`.
/// The two entry points for phase 1 are `union_find_block()` and `finish_union_find`.
impl<'a> Context<'a> {
/// Run the union-find algorithm on the parameter values on `ebb`.
/// Run the union-find algorithm on the parameter values on `block`.
///
/// This ensure that all EBB parameters will belong to the same virtual register as their
/// This ensure that all block parameters will belong to the same virtual register as their
/// corresponding arguments at all predecessor branches.
pub fn union_find_ebb(&mut self, ebb: Ebb) {
let num_params = self.func.dfg.num_ebb_params(ebb);
pub fn union_find_block(&mut self, block: Block) {
let num_params = self.func.dfg.num_block_params(block);
if num_params == 0 {
return;
}
self.isolate_conflicting_params(ebb, num_params);
self.isolate_conflicting_params(block, num_params);
for i in 0..num_params {
self.union_pred_args(ebb, i);
self.union_pred_args(block, i);
}
}
// Identify EBB parameter values that are live at one of the predecessor branches.
// Identify block parameter values that are live at one of the predecessor branches.
//
// Such a parameter value will conflict with any argument value at the predecessor branch, so
// it must be isolated by inserting a copy.
fn isolate_conflicting_params(&mut self, ebb: Ebb, num_params: usize) {
debug_assert_eq!(num_params, self.func.dfg.num_ebb_params(ebb));
// The only way a parameter value can interfere with a predecessor branch is if the EBB is
fn isolate_conflicting_params(&mut self, block: Block, num_params: usize) {
debug_assert_eq!(num_params, self.func.dfg.num_block_params(block));
// The only way a parameter value can interfere with a predecessor branch is if the block is
// dominating the predecessor branch. That is, we are looking for loop back-edges.
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
// The quick pre-order dominance check is accurate because the EBB parameter is defined
// at the top of the EBB before any branches.
if !self.preorder.dominates(ebb, pred_ebb) {
// The quick pre-order dominance check is accurate because the block parameter is defined
// at the top of the block before any branches.
if !self.preorder.dominates(block, pred_block) {
continue;
}
debug!(
" - checking {} params at back-edge {}: {}",
num_params,
pred_ebb,
pred_block,
self.func.dfg.display_inst(pred_inst, self.isa)
);
// Now `pred_inst` is known to be a back-edge, so it is possible for parameter values
// to be live at the use.
for i in 0..num_params {
let param = self.func.dfg.ebb_params(ebb)[i];
if self.liveness[param].reaches_use(pred_inst, pred_ebb, &self.func.layout) {
self.isolate_param(ebb, param);
let param = self.func.dfg.block_params(block)[i];
if self.liveness[param].reaches_use(pred_inst, pred_block, &self.func.layout) {
self.isolate_param(block, param);
}
}
}
}
// Union EBB parameter value `num` with the corresponding EBB arguments on the predecessor
// Union block parameter value `num` with the corresponding block arguments on the predecessor
// branches.
//
// Detect cases where the argument value is live-in to `ebb` so it conflicts with any EBB
// Detect cases where the argument value is live-in to `block` so it conflicts with any block
// parameter. Isolate the argument in those cases before unioning it with the parameter value.
fn union_pred_args(&mut self, ebb: Ebb, argnum: usize) {
let param = self.func.dfg.ebb_params(ebb)[argnum];
fn union_pred_args(&mut self, block: Block, argnum: usize) {
let param = self.func.dfg.block_params(block)[argnum];
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
let arg = self.func.dfg.inst_variable_args(pred_inst)[argnum];
// Never coalesce incoming function parameters on the stack. These parameters are
// pre-spilled, and the rest of the virtual register would be forced to spill to the
// `incoming_arg` stack slot too.
if let ir::ValueDef::Param(def_ebb, def_num) = self.func.dfg.value_def(arg) {
if Some(def_ebb) == self.func.layout.entry_block()
if let ir::ValueDef::Param(def_block, def_num) = self.func.dfg.value_def(arg) {
if Some(def_block) == self.func.layout.entry_block()
&& self.func.signature.params[def_num].location.is_stack()
{
debug!("-> isolating function stack parameter {}", arg);
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs.union(param, new_arg);
continue;
}
}
// Check for basic interference: If `arg` overlaps a value defined at the entry to
// `ebb`, it can never be used as an EBB argument.
// `block`, it can never be used as an block argument.
let interference = {
let lr = &self.liveness[arg];
// There are two ways the argument value can interfere with `ebb`:
// There are two ways the argument value can interfere with `block`:
//
// 1. It is defined in a dominating EBB and live-in to `ebb`.
// 2. If is itself a parameter value for `ebb`. This case should already have been
// 1. It is defined in a dominating block and live-in to `block`.
// 2. If is itself a parameter value for `block`. This case should already have been
// eliminated by `isolate_conflicting_params()`.
debug_assert!(
lr.def() != ebb.into(),
lr.def() != block.into(),
"{} parameter {} was missed by isolate_conflicting_params()",
ebb,
block,
arg
);
// The only other possibility is that `arg` is live-in to `ebb`.
lr.is_livein(ebb, &self.func.layout)
// The only other possibility is that `arg` is live-in to `block`.
lr.is_livein(block, &self.func.layout)
};
if interference {
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs.union(param, new_arg);
} else {
self.virtregs.union(param, arg);
@@ -262,31 +262,31 @@ impl<'a> Context<'a> {
}
}
// Isolate EBB parameter value `param` on `ebb`.
// Isolate block parameter value `param` on `block`.
//
// When `param=v10`:
//
// ebb1(v10: i32):
// block1(v10: i32):
// foo
//
// becomes:
//
// ebb1(v11: i32):
// block1(v11: i32):
// v10 = copy v11
// foo
//
// This function inserts the copy and updates the live ranges of the old and new parameter
// values. Returns the new parameter value.
fn isolate_param(&mut self, ebb: Ebb, param: Value) -> Value {
fn isolate_param(&mut self, block: Block, param: Value) -> Value {
debug_assert_eq!(
self.func.dfg.value_def(param).pp(),
ExpandedProgramPoint::Ebb(ebb)
ExpandedProgramPoint::Block(block)
);
let ty = self.func.dfg.value_type(param);
let new_val = self.func.dfg.replace_ebb_param(param, ty);
let new_val = self.func.dfg.replace_block_param(param, ty);
// Insert a copy instruction at the top of `ebb`.
let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(ebb);
// Insert a copy instruction at the top of `block`.
let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(block);
if let Some(inst) = pos.current_inst() {
pos.use_srcloc(inst);
}
@@ -297,7 +297,7 @@ impl<'a> Context<'a> {
debug!(
"-> inserted {}, following {}({}: {})",
pos.display_inst(inst),
ebb,
block,
new_val,
ty
);
@@ -311,27 +311,27 @@ impl<'a> Context<'a> {
.expect("Bad copy encoding")
.outs[0],
);
self.liveness.create_dead(new_val, ebb, affinity);
self.liveness.create_dead(new_val, block, affinity);
self.liveness
.extend_locally(new_val, ebb, inst, &pos.func.layout);
.extend_locally(new_val, block, inst, &pos.func.layout);
new_val
}
// Isolate the EBB argument `pred_val` from the predecessor `(pred_ebb, pred_inst)`.
// Isolate the block argument `pred_val` from the predecessor `(pred_block, pred_inst)`.
//
// It is assumed that `pred_inst` is a branch instruction in `pred_ebb` whose `argnum`'th EBB
// argument is `pred_val`. Since the argument value interferes with the corresponding EBB
// It is assumed that `pred_inst` is a branch instruction in `pred_block` whose `argnum`'th block
// argument is `pred_val`. Since the argument value interferes with the corresponding block
// parameter at the destination, a copy is used instead:
//
// brnz v1, ebb2(v10)
// brnz v1, block2(v10)
//
// Becomes:
//
// v11 = copy v10
// brnz v1, ebb2(v11)
// brnz v1, block2(v11)
//
// This way the interference with the EBB parameter is avoided.
// This way the interference with the block parameter is avoided.
//
// A live range for the new value is created while the live range for `pred_val` is left
// unaltered.
@@ -339,7 +339,7 @@ impl<'a> Context<'a> {
// The new argument value is returned.
fn isolate_arg(
&mut self,
pred_ebb: Ebb,
pred_block: Block,
pred_inst: Inst,
argnum: usize,
pred_val: Value,
@@ -360,14 +360,14 @@ impl<'a> Context<'a> {
);
self.liveness.create_dead(copy, inst, affinity);
self.liveness
.extend_locally(copy, pred_ebb, pred_inst, &pos.func.layout);
.extend_locally(copy, pred_block, pred_inst, &pos.func.layout);
pos.func.dfg.inst_variable_args_mut(pred_inst)[argnum] = copy;
debug!(
"-> inserted {}, before {}: {}",
pos.display_inst(inst),
pred_ebb,
pred_block,
pos.display_inst(pred_inst)
);
@@ -377,7 +377,7 @@ impl<'a> Context<'a> {
/// Finish the union-find part of the coalescing algorithm.
///
/// This builds the initial set of virtual registers as the transitive/reflexive/symmetric
/// closure of the relation formed by EBB parameter-argument pairs found by `union_find_ebb()`.
/// closure of the relation formed by block parameter-argument pairs found by `union_find_block()`.
fn finish_union_find(&mut self) {
self.virtregs.finish_union_find(None);
debug!("After union-find phase:{}", self.virtregs);
@@ -430,7 +430,7 @@ impl<'a> Context<'a> {
// Check for interference between `parent` and `value`. Since `parent` dominates
// `value`, we only have to check if it overlaps the definition.
if self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout) {
if self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout) {
// The two values are interfering, so they can't be in the same virtual register.
debug!("-> interference: {} overlaps def of {}", parent, value);
return false;
@@ -470,9 +470,9 @@ impl<'a> Context<'a> {
}
}
/// Merge EBB parameter value `param` with virtual registers at its predecessors.
/// Merge block parameter value `param` with virtual registers at its predecessors.
fn merge_param(&mut self, param: Value) {
let (ebb, argnum) = match self.func.dfg.value_def(param) {
let (block, argnum) = match self.func.dfg.value_def(param) {
ir::ValueDef::Param(e, n) => (e, n),
ir::ValueDef::Result(_, _) => panic!("Expected parameter"),
};
@@ -493,12 +493,12 @@ impl<'a> Context<'a> {
// not loop backedges.
debug_assert!(self.predecessors.is_empty());
debug_assert!(self.backedges.is_empty());
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
if self.preorder.dominates(ebb, pred_ebb) {
if self.preorder.dominates(block, pred_block) {
self.backedges.push(pred_inst);
} else {
self.predecessors.push(pred_inst);
@@ -522,8 +522,8 @@ impl<'a> Context<'a> {
}
// Can't merge because of interference. Insert a copy instead.
let pred_ebb = self.func.layout.pp_ebb(pred_inst);
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let pred_block = self.func.layout.pp_block(pred_inst);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs
.insert_single(param, new_arg, self.func, self.preorder);
}
@@ -616,12 +616,12 @@ impl<'a> Context<'a> {
// Check if the parent value interferes with the virtual copy.
let inst = node.def.unwrap_inst();
if node.set_id != parent.set_id
&& self.liveness[parent.value].reaches_use(inst, node.ebb, &self.func.layout)
&& self.liveness[parent.value].reaches_use(inst, node.block, &self.func.layout)
{
debug!(
" - interference: {} overlaps vcopy at {}:{}",
parent,
node.ebb,
node.block,
self.func.dfg.display_inst(inst, self.isa)
);
return false;
@@ -640,7 +640,7 @@ impl<'a> Context<'a> {
// Both node and parent are values, so check for interference.
debug_assert!(node.is_value() && parent.is_value());
if node.set_id != parent.set_id
&& self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout)
&& self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout)
{
// The two values are interfering.
debug!(" - interference: {} overlaps def of {}", parent, node.value);
@@ -663,7 +663,7 @@ impl<'a> Context<'a> {
///
/// The idea of a dominator forest was introduced on the Budimlic paper and the linear stack
/// representation in the Boissinot paper. Our version of the linear stack is slightly modified
/// because we have a pre-order of the dominator tree at the EBB granularity, not basic block
/// because we have a pre-order of the dominator tree at the block granularity, not basic block
/// granularity.
///
/// Values are pushed in dominator tree pre-order of their definitions, and for each value pushed,
@@ -673,7 +673,7 @@ struct DomForest {
// Stack representing the rightmost edge of the dominator forest so far, ending in the last
// element of `values`.
//
// At all times, the EBB of each element in the stack dominates the EBB of the next one.
// At all times, the block of each element in the stack dominates the block of the next one.
stack: Vec<Node>,
}
@@ -683,8 +683,8 @@ struct DomForest {
struct Node {
/// The program point where the live range is defined.
def: ExpandedProgramPoint,
/// EBB containing `def`.
ebb: Ebb,
/// block containing `def`.
block: Block,
/// Is this a virtual copy or a value?
is_vcopy: bool,
/// Set identifier.
@@ -698,10 +698,10 @@ impl Node {
/// Create a node representing `value`.
pub fn value(value: Value, set_id: u8, func: &Function) -> Self {
let def = func.dfg.value_def(value).pp();
let ebb = func.layout.pp_ebb(def);
let block = func.layout.pp_block(def);
Self {
def,
ebb,
block,
is_vcopy: false,
set_id,
value,
@@ -711,10 +711,10 @@ impl Node {
/// Create a node representing a virtual copy.
pub fn vcopy(branch: Inst, value: Value, set_id: u8, func: &Function) -> Self {
let def = branch.into();
let ebb = func.layout.pp_ebb(def);
let block = func.layout.pp_block(def);
Self {
def,
ebb,
block,
is_vcopy: true,
set_id,
value,
@@ -730,9 +730,9 @@ impl Node {
impl fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_vcopy {
write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.ebb)
write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.block)
} else {
write!(f, "{}:{}@{}", self.set_id, self.value, self.ebb)
write!(f, "{}:{}@{}", self.set_id, self.value, self.block)
}
}
}
@@ -760,16 +760,16 @@ impl DomForest {
preorder: &DominatorTreePreorder,
) -> Option<Node> {
// The stack contains the current sequence of dominating defs. Pop elements until we
// find one whose EBB dominates `node.ebb`.
// find one whose block dominates `node.block`.
while let Some(top) = self.stack.pop() {
if preorder.dominates(top.ebb, node.ebb) {
if preorder.dominates(top.block, node.block) {
// This is the right insertion spot for `node`.
self.stack.push(top);
self.stack.push(node);
// We know here that `top.ebb` dominates `node.ebb`, and thus `node.def`. This does
// We know here that `top.block` dominates `node.block`, and thus `node.def`. This does
// not necessarily mean that `top.def` dominates `node.def`, though. The `top.def`
// program point may be below the last branch in `top.ebb` that dominates
// program point may be below the last branch in `top.block` that dominates
// `node.def`.
//
// We do know, though, that if there is a nearest value dominating `node.def`, it
@@ -777,16 +777,16 @@ impl DomForest {
// dominates.
let mut last_dom = node.def;
for &n in self.stack.iter().rev().skip(1) {
// If the node is defined at the EBB header, it does in fact dominate
// If the node is defined at the block header, it does in fact dominate
// everything else pushed on the stack.
let def_inst = match n.def {
ExpandedProgramPoint::Ebb(_) => return Some(n),
ExpandedProgramPoint::Block(_) => return Some(n),
ExpandedProgramPoint::Inst(i) => i,
};
// We need to find the last program point in `n.ebb` to dominate `node.def`.
last_dom = match domtree.last_dominator(n.ebb, last_dom, &func.layout) {
None => n.ebb.into(),
// We need to find the last program point in `n.block` to dominate `node.def`.
last_dom = match domtree.last_dominator(n.block, last_dom, &func.layout) {
None => n.block.into(),
Some(inst) => {
if func.layout.cmp(def_inst, inst) != cmp::Ordering::Greater {
return Some(n);
@@ -816,18 +816,18 @@ impl DomForest {
/// When building a full virtual register at once, like phase 1 does with union-find, it is good
/// enough to check for interference between the values in the full virtual register like
/// `check_vreg()` does. However, in phase 2 we are doing pairwise merges of partial virtual
/// registers that don't represent the full transitive closure of the EBB argument-parameter
/// registers that don't represent the full transitive closure of the block argument-parameter
/// relation. This means that just checking for interference between values is inadequate.
///
/// Example:
///
/// v1 = iconst.i32 1
/// brnz v10, ebb1(v1)
/// brnz v10, block1(v1)
/// v2 = iconst.i32 2
/// brnz v11, ebb1(v2)
/// brnz v11, block1(v2)
/// return v1
///
/// ebb1(v3: i32):
/// block1(v3: i32):
/// v4 = iadd v3, v1
///
/// With just value interference checking, we could build the virtual register [v3, v1] since those
@@ -835,13 +835,13 @@ impl DomForest {
/// interfere. However, we can't resolve that interference either by inserting a copy:
///
/// v1 = iconst.i32 1
/// brnz v10, ebb1(v1)
/// brnz v10, block1(v1)
/// v2 = iconst.i32 2
/// v20 = copy v2 <-- new value
/// brnz v11, ebb1(v20)
/// brnz v11, block1(v20)
/// return v1
///
/// ebb1(v3: i32):
/// block1(v3: i32):
/// v4 = iadd v3, v1
///
/// The new value v20 still interferes with v1 because v1 is live across the "brnz v11" branch. We
@@ -851,32 +851,32 @@ impl DomForest {
/// instructions, then attempting to delete the copies. This is quite expensive because it involves
/// creating a large number of copies and value.
///
/// We'll detect this form of interference with *virtual copies*: Each EBB parameter value that
/// hasn't yet been fully merged with its EBB argument values is given a set of virtual copies at
/// We'll detect this form of interference with *virtual copies*: Each block parameter value that
/// hasn't yet been fully merged with its block argument values is given a set of virtual copies at
/// the predecessors. Any candidate value to be merged is checked for interference against both the
/// virtual register and the virtual copies.
///
/// In the general case, we're checking if two virtual registers can be merged, and both can
/// contain incomplete EBB parameter values with associated virtual copies.
/// contain incomplete block parameter values with associated virtual copies.
///
/// The `VirtualCopies` struct represents a set of incomplete parameters and their associated
/// virtual copies. Given two virtual registers, it can produce an ordered sequence of nodes
/// representing the virtual copies in both vregs.
struct VirtualCopies {
// Incomplete EBB parameters. These don't need to belong to the same virtual register.
// Incomplete block parameters. These don't need to belong to the same virtual register.
params: Vec<Value>,
// Set of `(branch, destination)` pairs. These are all the predecessor branches for the EBBs
// Set of `(branch, destination)` pairs. These are all the predecessor branches for the blocks
// whose parameters can be found in `params`.
//
// Ordered by dominator tree pre-order of the branch instructions.
branches: Vec<(Inst, Ebb)>,
branches: Vec<(Inst, Block)>,
// Filter for the currently active node iterator.
//
// An ebb => (set_id, num) entry means that branches to `ebb` are active in `set_id` with
// An block => (set_id, num) entry means that branches to `block` are active in `set_id` with
// branch argument number `num`.
filter: FxHashMap<Ebb, (u8, usize)>,
filter: FxHashMap<Block, (u8, usize)>,
}
impl VirtualCopies {
@@ -901,7 +901,7 @@ impl VirtualCopies {
///
/// The values are assumed to be in domtree pre-order.
///
/// This will extract the EBB parameter values and associate virtual copies all of them.
/// This will extract the block parameter values and associate virtual copies all of them.
pub fn initialize(
&mut self,
values: &[Value],
@@ -911,29 +911,29 @@ impl VirtualCopies {
) {
self.clear();
let mut last_ebb = None;
let mut last_block = None;
for &val in values {
if let ir::ValueDef::Param(ebb, _) = func.dfg.value_def(val) {
if let ir::ValueDef::Param(block, _) = func.dfg.value_def(val) {
self.params.push(val);
// We may have multiple parameters from the same EBB, but we only need to collect
// We may have multiple parameters from the same block, but we only need to collect
// predecessors once. Also verify the ordering of values.
if let Some(last) = last_ebb {
match preorder.pre_cmp_ebb(last, ebb) {
if let Some(last) = last_block {
match preorder.pre_cmp_block(last, block) {
cmp::Ordering::Less => {}
cmp::Ordering::Equal => continue,
cmp::Ordering::Greater => panic!("values in wrong order"),
}
}
// This EBB hasn't been seen before.
for BasicBlock {
// This block hasn't been seen before.
for BlockPredecessor {
inst: pred_inst, ..
} in cfg.pred_iter(ebb)
} in cfg.pred_iter(block)
{
self.branches.push((pred_inst, ebb));
self.branches.push((pred_inst, block));
}
last_ebb = Some(ebb);
last_block = Some(block);
}
}
@@ -953,7 +953,7 @@ impl VirtualCopies {
debug_assert_eq!(popped, Some(param));
// The domtree pre-order in `self.params` guarantees that all parameters defined at the
// same EBB will be adjacent. This means we can see when all parameters at an EBB have been
// same block will be adjacent. This means we can see when all parameters at an block have been
// merged.
//
// We don't care about the last parameter - when that is merged we are done.
@@ -961,16 +961,16 @@ impl VirtualCopies {
None => return,
Some(x) => *x,
};
let ebb = func.dfg.value_def(param).unwrap_ebb();
if func.dfg.value_def(last).unwrap_ebb() == ebb {
// We're not done with `ebb` parameters yet.
let block = func.dfg.value_def(param).unwrap_block();
if func.dfg.value_def(last).unwrap_block() == block {
// We're not done with `block` parameters yet.
return;
}
// Alright, we know there are no remaining `ebb` parameters in `self.params`. This means we
// can get rid of the `ebb` predecessors in `self.branches`. We don't have to, the
// Alright, we know there are no remaining `block` parameters in `self.params`. This means we
// can get rid of the `block` predecessors in `self.branches`. We don't have to, the
// `VCopyIter` will just skip them, but this reduces its workload.
self.branches.retain(|&(_, dest)| dest != ebb);
self.branches.retain(|&(_, dest)| dest != block);
}
/// Set a filter for the virtual copy nodes we're generating.
@@ -991,28 +991,28 @@ impl VirtualCopies {
// removed from the back once they are fully merged. This means we can stop looking for
// parameters once we're beyond the last one.
let last_param = *self.params.last().expect("No more parameters");
let limit = func.dfg.value_def(last_param).unwrap_ebb();
let limit = func.dfg.value_def(last_param).unwrap_block();
for (set_id, repr) in reprs.iter().enumerate() {
let set_id = set_id as u8;
for &value in virtregs.congruence_class(repr) {
if let ir::ValueDef::Param(ebb, num) = func.dfg.value_def(value) {
if preorder.pre_cmp_ebb(ebb, limit) == cmp::Ordering::Greater {
if let ir::ValueDef::Param(block, num) = func.dfg.value_def(value) {
if preorder.pre_cmp_block(block, limit) == cmp::Ordering::Greater {
// Stop once we're outside the bounds of `self.params`.
break;
}
self.filter.insert(ebb, (set_id, num));
self.filter.insert(block, (set_id, num));
}
}
}
}
/// Look up the set_id and argument number for `ebb` in the current filter.
/// Look up the set_id and argument number for `block` in the current filter.
///
/// Returns `None` if none of the currently active parameters are defined at `ebb`. Otherwise
/// returns `(set_id, argnum)` for an active parameter defined at `ebb`.
fn lookup(&self, ebb: Ebb) -> Option<(u8, usize)> {
self.filter.get(&ebb).cloned()
/// Returns `None` if none of the currently active parameters are defined at `block`. Otherwise
/// returns `(set_id, argnum)` for an active parameter defined at `block`.
fn lookup(&self, block: Block) -> Option<(u8, usize)> {
self.filter.get(&block).cloned()
}
/// Get an iterator of dom-forest nodes corresponding to the current filter.
@@ -1032,7 +1032,7 @@ impl VirtualCopies {
struct VCopyIter<'a> {
func: &'a Function,
vcopies: &'a VirtualCopies,
branches: slice::Iter<'a, (Inst, Ebb)>,
branches: slice::Iter<'a, (Inst, Block)>,
}
impl<'a> Iterator for VCopyIter<'a> {
@@ -1090,7 +1090,7 @@ where
(Some(a), Some(b)) => {
let layout = self.layout;
self.preorder
.pre_cmp_ebb(a.ebb, b.ebb)
.pre_cmp_block(a.block, b.block)
.then_with(|| layout.cmp(a.def, b.def))
}
(Some(_), None) => cmp::Ordering::Less,

View File

@@ -24,8 +24,8 @@
//! a register.
//!
//! 5. The code must be in Conventional SSA form. Among other things, this means that values passed
//! as arguments when branching to an EBB must belong to the same virtual register as the
//! corresponding EBB argument value.
//! as arguments when branching to an block must belong to the same virtual register as the
//! corresponding block argument value.
//!
//! # Iteration order
//!
@@ -35,10 +35,10 @@
//! defined by the instruction and only consider the colors of other values that are live at the
//! instruction.
//!
//! The first time we see a branch to an EBB, the EBB's argument values are colored to match the
//! The first time we see a branch to an block, the block's argument values are colored to match the
//! registers currently holding branch argument values passed to the predecessor branch. By
//! visiting EBBs in a CFG topological order, we guarantee that at least one predecessor branch has
//! been visited before the destination EBB. Therefore, the EBB's arguments are already colored.
//! visiting blocks in a CFG topological order, we guarantee that at least one predecessor branch has
//! been visited before the destination block. Therefore, the block's arguments are already colored.
//!
//! The exception is the entry block whose arguments are colored from the ABI requirements.
@@ -46,7 +46,7 @@ use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{ArgumentLoc, InstBuilder, ValueDef};
use crate::ir::{Ebb, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc};
use crate::ir::{Block, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc};
use crate::isa::{regs_overlap, RegClass, RegInfo, RegUnit};
use crate::isa::{ConstraintKind, EncInfo, OperandConstraint, RecipeConstraints, TargetIsa};
use crate::packed_option::PackedOption;
@@ -168,20 +168,20 @@ impl<'a> Context<'a> {
.resize(self.cur.func.dfg.num_values());
// Visit blocks in reverse post-order. We need to ensure that at least one predecessor has
// been visited before each EBB. That guarantees that the EBB arguments have been colored.
for &ebb in self.domtree.cfg_postorder().iter().rev() {
self.visit_ebb(ebb, tracker);
// been visited before each block. That guarantees that the block arguments have been colored.
for &block in self.domtree.cfg_postorder().iter().rev() {
self.visit_block(block, tracker);
}
}
/// Visit `ebb`, assuming that the immediate dominator has already been visited.
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Coloring {}:", ebb);
let mut regs = self.visit_ebb_header(ebb, tracker);
/// Visit `block`, assuming that the immediate dominator has already been visited.
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Coloring {}:", block);
let mut regs = self.visit_block_header(block, tracker);
tracker.drop_dead_params();
// Now go through the instructions in `ebb` and color the values they define.
self.cur.goto_top(ebb);
// Now go through the instructions in `block` and color the values they define.
self.cur.goto_top(block);
while let Some(inst) = self.cur.next_inst() {
self.cur.use_srcloc(inst);
let opcode = self.cur.func.dfg[inst].opcode();
@@ -204,7 +204,7 @@ impl<'a> Context<'a> {
tracker.drop_dead(inst);
// We are not able to insert any regmove for diversion or un-diversion after the first
// branch. Instead, we record the diversion to be restored at the entry of the next EBB,
// branch. Instead, we record the diversion to be restored at the entry of the next block,
// which should have a single predecessor.
if opcode.is_branch() {
// The next instruction is necessarily an unconditional branch.
@@ -221,15 +221,15 @@ impl<'a> Context<'a> {
"unexpected instruction {} after a conditional branch",
self.cur.display_inst(branch)
),
SingleDest(ebb, _) => ebb,
SingleDest(block, _) => block,
};
// We have a single branch with a single target, and an EBB with a single
// predecessor. Thus we can forward the diversion set to the next EBB.
// We have a single branch with a single target, and an block with a single
// predecessor. Thus we can forward the diversion set to the next block.
if self.cfg.pred_iter(target).count() == 1 {
// Transfer the diversion to the next EBB.
// Transfer the diversion to the next block.
self.divert
.save_for_ebb(&mut self.cur.func.entry_diversions, target);
.save_for_block(&mut self.cur.func.entry_diversions, target);
debug!(
"Set entry-diversion for {} to\n {}",
target,
@@ -253,13 +253,17 @@ impl<'a> Context<'a> {
}
}
/// Visit the `ebb` header.
/// Visit the `block` header.
///
/// Initialize the set of live registers and color the arguments to `ebb`.
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) -> AvailableRegs {
// Reposition the live value tracker and deal with the EBB arguments.
tracker.ebb_top(
ebb,
/// Initialize the set of live registers and color the arguments to `block`.
fn visit_block_header(
&mut self,
block: Block,
tracker: &mut LiveValueTracker,
) -> AvailableRegs {
// Reposition the live value tracker and deal with the block arguments.
tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
@@ -268,18 +272,18 @@ impl<'a> Context<'a> {
// Copy the content of the registered diversions to be reused at the
// entry of this basic block.
self.divert.at_ebb(&self.cur.func.entry_diversions, ebb);
self.divert.at_block(&self.cur.func.entry_diversions, block);
debug!(
"Start {} with entry-diversion set to\n {}",
ebb,
block,
self.divert.display(&self.reginfo)
);
if self.cur.func.layout.entry_block() == Some(ebb) {
if self.cur.func.layout.entry_block() == Some(block) {
// Parameters on the entry block have ABI constraints.
self.color_entry_params(tracker.live())
} else {
// The live-ins and parameters of a non-entry EBB have already been assigned a register.
// The live-ins and parameters of a non-entry block have already been assigned a register.
// Reconstruct the allocatable set.
self.livein_regs(tracker.live())
}
@@ -288,7 +292,7 @@ impl<'a> Context<'a> {
/// Initialize a set of allocatable registers from the values that are live-in to a block.
/// These values must already be colored when the dominating blocks were processed.
///
/// Also process the EBB arguments which were colored when the first predecessor branch was
/// Also process the block arguments which were colored when the first predecessor branch was
/// encountered.
fn livein_regs(&self, live: &[LiveValue]) -> AvailableRegs {
// Start from the registers that are actually usable. We don't want to include any reserved
@@ -428,7 +432,7 @@ impl<'a> Context<'a> {
regs.input.display(&self.reginfo),
);
// EBB whose arguments should be colored to match the current branch instruction's
// block whose arguments should be colored to match the current branch instruction's
// arguments.
let mut color_dest_args = None;
@@ -446,10 +450,10 @@ impl<'a> Context<'a> {
self.program_input_abi(inst, AbiParams::Returns);
} else if self.cur.func.dfg[inst].opcode().is_branch() {
// This is a branch, so we need to make sure that globally live values are in their
// global registers. For EBBs that take arguments, we also need to place the argument
// global registers. For blocks that take arguments, we also need to place the argument
// values in the expected registers.
if let Some(dest) = self.cur.func.dfg[inst].branch_destination() {
if self.program_ebb_arguments(inst, dest) {
if self.program_block_arguments(inst, dest) {
color_dest_args = Some(dest);
}
} else {
@@ -458,7 +462,7 @@ impl<'a> Context<'a> {
debug_assert_eq!(
self.cur.func.dfg.inst_variable_args(inst).len(),
0,
"Can't handle EBB arguments: {}",
"Can't handle block arguments: {}",
self.cur.display_inst(inst)
);
self.undivert_regs(|lr, _| !lr.is_local());
@@ -576,7 +580,7 @@ impl<'a> Context<'a> {
// If this is the first time we branch to `dest`, color its arguments to match the current
// register state.
if let Some(dest) = color_dest_args {
self.color_ebb_params(inst, dest);
self.color_block_params(inst, dest);
}
// Apply the solution to the defs.
@@ -727,7 +731,7 @@ impl<'a> Context<'a> {
// This code runs after calling `solver.inputs_done()` so we must identify
// the new variable as killed or live-through.
let layout = &self.cur.func.layout;
if self.liveness[arg_val].killed_at(inst, layout.pp_ebb(inst), layout) {
if self.liveness[arg_val].killed_at(inst, layout.pp_block(inst), layout) {
self.solver
.add_killed_var(arg_val, constraint.regclass, cur_reg);
} else {
@@ -747,12 +751,12 @@ impl<'a> Context<'a> {
///
/// 1. Any values that are live-in to `dest` must be un-diverted so they live in their globally
/// assigned register.
/// 2. If the `dest` EBB takes arguments, reassign the branch argument values to the matching
/// 2. If the `dest` block takes arguments, reassign the branch argument values to the matching
/// registers.
///
/// Returns true if this is the first time a branch to `dest` is seen, so the `dest` argument
/// values should be colored after `shuffle_inputs`.
fn program_ebb_arguments(&mut self, inst: Inst, dest: Ebb) -> bool {
fn program_block_arguments(&mut self, inst: Inst, dest: Block) -> bool {
// Find diverted registers that are live-in to `dest` and reassign them to their global
// home.
//
@@ -760,9 +764,9 @@ impl<'a> Context<'a> {
// arguments, so they can't always be un-diverted.
self.undivert_regs(|lr, layout| lr.is_livein(dest, layout));
// Now handle the EBB arguments.
// Now handle the block arguments.
let br_args = self.cur.func.dfg.inst_variable_args(inst);
let dest_args = self.cur.func.dfg.ebb_params(dest);
let dest_args = self.cur.func.dfg.block_params(dest);
debug_assert_eq!(br_args.len(), dest_args.len());
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
// The first time we encounter a branch to `dest`, we get to pick the location. The
@@ -771,7 +775,7 @@ impl<'a> Context<'a> {
ValueLoc::Unassigned => {
// This is the first branch to `dest`, so we should color `dest_arg` instead of
// `br_arg`. However, we don't know where `br_arg` will end up until
// after `shuffle_inputs`. See `color_ebb_params` below.
// after `shuffle_inputs`. See `color_block_params` below.
//
// It is possible for `dest_arg` to have no affinity, and then it should simply
// be ignored.
@@ -804,10 +808,10 @@ impl<'a> Context<'a> {
/// Knowing that we've never seen a branch to `dest` before, color its parameters to match our
/// register state.
///
/// This function is only called when `program_ebb_arguments()` returned `true`.
fn color_ebb_params(&mut self, inst: Inst, dest: Ebb) {
/// This function is only called when `program_block_arguments()` returned `true`.
fn color_block_params(&mut self, inst: Inst, dest: Block) {
let br_args = self.cur.func.dfg.inst_variable_args(inst);
let dest_args = self.cur.func.dfg.ebb_params(dest);
let dest_args = self.cur.func.dfg.block_params(dest);
debug_assert_eq!(br_args.len(), dest_args.len());
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
match self.cur.func.locations[dest_arg] {
@@ -818,7 +822,7 @@ impl<'a> Context<'a> {
}
}
ValueLoc::Reg(_) => panic!("{} arg {} already colored", dest, dest_arg),
// Spilled value consistency is verified by `program_ebb_arguments()` above.
// Spilled value consistency is verified by `program_block_arguments()` above.
ValueLoc::Stack(_) => {}
}
}
@@ -1082,7 +1086,7 @@ impl<'a> Context<'a> {
/// Determine if `value` is live on a CFG edge from the current instruction.
///
/// This means that the current instruction is a branch and `value` is live in to one of the
/// branch destinations. Branch arguments and EBB parameters are not considered live on the
/// branch destinations. Branch arguments and block parameters are not considered live on the
/// edge.
fn is_live_on_outgoing_edge(&self, value: Value) -> bool {
use crate::ir::instructions::BranchInfo::*;
@@ -1091,17 +1095,17 @@ impl<'a> Context<'a> {
let layout = &self.cur.func.layout;
match self.cur.func.dfg.analyze_branch(inst) {
NotABranch => false,
SingleDest(ebb, _) => {
SingleDest(block, _) => {
let lr = &self.liveness[value];
lr.is_livein(ebb, layout)
lr.is_livein(block, layout)
}
Table(jt, ebb) => {
Table(jt, block) => {
let lr = &self.liveness[value];
!lr.is_local()
&& (ebb.map_or(false, |ebb| lr.is_livein(ebb, layout))
&& (block.map_or(false, |block| lr.is_livein(block, layout))
|| self.cur.func.jump_tables[jt]
.iter()
.any(|ebb| lr.is_livein(*ebb, layout)))
.any(|block| lr.is_livein(*block, layout)))
}
}
}
@@ -1232,7 +1236,7 @@ impl<'a> Context<'a> {
self.liveness.create_dead(local, inst, lv.affinity);
self.liveness.extend_locally(
local,
self.cur.func.layout.pp_ebb(inst),
self.cur.func.layout.pp_block(inst),
copy,
&self.cur.func.layout,
);

View File

@@ -4,12 +4,12 @@
//! Sometimes, it is necessary to move register values to a different register in order to satisfy
//! instruction constraints.
//!
//! These register diversions are local to an EBB. No values can be diverted when entering a new
//! EBB.
//! These register diversions are local to an block. No values can be diverted when entering a new
//! block.
use crate::fx::FxHashMap;
use crate::hash_map::{Entry, Iter};
use crate::ir::{Ebb, StackSlot, Value, ValueLoc, ValueLocations};
use crate::ir::{Block, StackSlot, Value, ValueLoc, ValueLocations};
use crate::ir::{InstructionData, Opcode};
use crate::isa::{RegInfo, RegUnit};
use core::fmt;
@@ -38,22 +38,22 @@ impl Diversion {
}
}
/// Keep track of diversions in an EBB.
/// Keep track of diversions in an block.
#[derive(Clone)]
pub struct RegDiversions {
current: FxHashMap<Value, Diversion>,
}
/// Keep track of diversions at the entry of EBB.
/// Keep track of diversions at the entry of block.
#[derive(Clone)]
struct EntryRegDiversionsValue {
key: Ebb,
key: Block,
divert: RegDiversions,
}
/// Map EBB to their matching RegDiversions at basic blocks entry.
/// Map block to their matching RegDiversions at basic blocks entry.
pub struct EntryRegDiversions {
map: SparseMap<Ebb, EntryRegDiversionsValue>,
map: SparseMap<Block, EntryRegDiversionsValue>,
}
impl RegDiversions {
@@ -178,22 +178,22 @@ impl RegDiversions {
}
/// Resets the state of the current diversions to the recorded diversions at the entry of the
/// given `ebb`. The recoded diversions is available after coloring on `func.entry_diversions`
/// given `block`. The recoded diversions is available after coloring on `func.entry_diversions`
/// field.
pub fn at_ebb(&mut self, entry_diversions: &EntryRegDiversions, ebb: Ebb) {
pub fn at_block(&mut self, entry_diversions: &EntryRegDiversions, block: Block) {
self.clear();
if let Some(entry_divert) = entry_diversions.map.get(ebb) {
if let Some(entry_divert) = entry_diversions.map.get(block) {
let iter = entry_divert.divert.current.iter();
self.current.extend(iter);
}
}
/// Copy the current state of the diversions, and save it for the entry of the `ebb` given as
/// Copy the current state of the diversions, and save it for the entry of the `block` given as
/// argument.
///
/// Note: This function can only be called once on an `ebb` with a given `entry_diversions`
/// Note: This function can only be called once on a `Block` with a given `entry_diversions`
/// argument, otherwise it would panic.
pub fn save_for_ebb(&mut self, entry_diversions: &mut EntryRegDiversions, target: Ebb) {
pub fn save_for_block(&mut self, entry_diversions: &mut EntryRegDiversions, target: Block) {
// No need to save anything if there is no diversions to be recorded.
if self.is_empty() {
return;
@@ -208,9 +208,9 @@ impl RegDiversions {
});
}
/// Check that the recorded entry for a given `ebb` matches what is recorded in the
/// Check that the recorded entry for a given `block` matches what is recorded in the
/// `entry_diversions`.
pub fn check_ebb_entry(&self, entry_diversions: &EntryRegDiversions, target: Ebb) -> bool {
pub fn check_block_entry(&self, entry_diversions: &EntryRegDiversions, target: Block) -> bool {
let entry_divert = match entry_diversions.map.get(target) {
Some(entry_divert) => entry_divert,
None => return self.is_empty(),
@@ -235,7 +235,7 @@ impl RegDiversions {
}
impl EntryRegDiversions {
/// Create a new empty entry diversion, to associate diversions to each EBB entry.
/// Create a new empty entry diversion, to associate diversions to each block entry.
pub fn new() -> Self {
Self {
map: SparseMap::new(),
@@ -259,9 +259,9 @@ impl Clone for EntryRegDiversions {
}
/// Implement `SparseMapValue`, as required to make use of a `SparseMap` for mapping the entry
/// diversions for each EBB.
impl SparseMapValue<Ebb> for EntryRegDiversionsValue {
fn key(&self) -> Ebb {
/// diversions for each block.
impl SparseMapValue<Block> for EntryRegDiversionsValue {
fn key(&self) -> Block {
self.key
}
}

View File

@@ -1,13 +1,13 @@
//! Track which values are live in an EBB with instruction granularity.
//! Track which values are live in an block with instruction granularity.
//!
//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an EBB.
//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an block.
//! The sets of live values are computed on the fly as the tracker is moved from instruction to
//! instruction, starting at the EBB header.
//! instruction, starting at the block header.
use crate::dominator_tree::DominatorTree;
use crate::entity::{EntityList, ListPool};
use crate::fx::FxHashMap;
use crate::ir::{DataFlowGraph, Ebb, ExpandedProgramPoint, Inst, Layout, Value};
use crate::ir::{Block, DataFlowGraph, ExpandedProgramPoint, Inst, Layout, Value};
use crate::partition_slice::partition_slice;
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liveness::Liveness;
@@ -16,13 +16,13 @@ use alloc::vec::Vec;
type ValueList = EntityList<Value>;
/// Compute and track live values throughout an EBB.
/// Compute and track live values throughout an block.
pub struct LiveValueTracker {
/// The set of values that are live at the current program point.
live: LiveValueVec,
/// Saved set of live values for every jump and branch that can potentially be an immediate
/// dominator of an EBB.
/// dominator of an block.
///
/// This is the set of values that are live *before* the branch.
idom_sets: FxHashMap<Inst, ValueList>,
@@ -37,7 +37,7 @@ pub struct LiveValue {
/// The live value.
pub value: Value,
/// The local ending point of the live range in the current EBB, as returned by
/// The local ending point of the live range in the current block, as returned by
/// `LiveRange::def_local_end()` or `LiveRange::livein_local_end()`.
pub endpoint: Inst,
@@ -47,7 +47,7 @@ pub struct LiveValue {
/// almost all users of `LiveValue` need to look at it.
pub affinity: Affinity,
/// The live range for this value never leaves its EBB.
/// The live range for this value never leaves its block.
pub is_local: bool,
/// This value is dead - the live range ends immediately.
@@ -155,75 +155,75 @@ impl LiveValueTracker {
&mut self.live.values
}
/// Move the current position to the top of `ebb`.
/// Move the current position to the top of `block`.
///
/// This depends on the stored live value set at `ebb`'s immediate dominator, so that must have
/// This depends on the stored live value set at `block`'s immediate dominator, so that must have
/// been visited first.
///
/// Returns `(liveins, args)` as a pair of slices. The first slice is the set of live-in values
/// from the immediate dominator. The second slice is the set of `ebb` parameters.
/// from the immediate dominator. The second slice is the set of `block` parameters.
///
/// Dead parameters with no uses are included in `args`. Call `drop_dead_args()` to remove them.
pub fn ebb_top(
pub fn block_top(
&mut self,
ebb: Ebb,
block: Block,
dfg: &DataFlowGraph,
liveness: &Liveness,
layout: &Layout,
domtree: &DominatorTree,
) -> (&[LiveValue], &[LiveValue]) {
// Start over, compute the set of live values at the top of the EBB from two sources:
// Start over, compute the set of live values at the top of the block from two sources:
//
// 1. Values that were live before `ebb`'s immediate dominator, filtered for those that are
// 1. Values that were live before `block`'s immediate dominator, filtered for those that are
// actually live-in.
// 2. Arguments to `ebb` that are not dead.
// 2. Arguments to `block` that are not dead.
//
self.live.clear();
// Compute the live-in values. Start by filtering the set of values that were live before
// the immediate dominator. Just use the empty set if there's no immediate dominator (i.e.,
// the entry block or an unreachable block).
if let Some(idom) = domtree.idom(ebb) {
if let Some(idom) = domtree.idom(block) {
// If the immediate dominator exits, we must have a stored list for it. This is a
// requirement to the order EBBs are visited: All dominators must have been processed
// before the current EBB.
// requirement to the order blocks are visited: All dominators must have been processed
// before the current block.
let idom_live_list = self
.idom_sets
.get(&idom)
.expect("No stored live set for dominator");
// Get just the values that are live-in to `ebb`.
// Get just the values that are live-in to `block`.
for &value in idom_live_list.as_slice(&self.idom_pool) {
let lr = liveness
.get(value)
.expect("Immediate dominator value has no live range");
// Check if this value is live-in here.
if let Some(endpoint) = lr.livein_local_end(ebb, layout) {
if let Some(endpoint) = lr.livein_local_end(block, layout) {
self.live.push(value, endpoint, lr);
}
}
}
// Now add all the live parameters to `ebb`.
// Now add all the live parameters to `block`.
let first_arg = self.live.values.len();
for &value in dfg.ebb_params(ebb) {
for &value in dfg.block_params(block) {
let lr = &liveness[value];
debug_assert_eq!(lr.def(), ebb.into());
debug_assert_eq!(lr.def(), block.into());
match lr.def_local_end().into() {
ExpandedProgramPoint::Inst(endpoint) => {
self.live.push(value, endpoint, lr);
}
ExpandedProgramPoint::Ebb(local_ebb) => {
// This is a dead EBB parameter which is not even live into the first
// instruction in the EBB.
ExpandedProgramPoint::Block(local_block) => {
// This is a dead block parameter which is not even live into the first
// instruction in the block.
debug_assert_eq!(
local_ebb, ebb,
"EBB parameter live range ends at wrong EBB header"
local_block, block,
"block parameter live range ends at wrong block header"
);
// Give this value a fake endpoint that is the first instruction in the EBB.
// Give this value a fake endpoint that is the first instruction in the block.
// We expect it to be removed by calling `drop_dead_args()`.
self.live
.push(value, layout.first_inst(ebb).expect("Empty EBB"), lr);
.push(value, layout.first_inst(block).expect("Empty block"), lr);
}
}
}
@@ -274,8 +274,8 @@ impl LiveValueTracker {
ExpandedProgramPoint::Inst(endpoint) => {
self.live.push(value, endpoint, lr);
}
ExpandedProgramPoint::Ebb(ebb) => {
panic!("Instruction result live range can't end at {}", ebb);
ExpandedProgramPoint::Block(block) => {
panic!("Instruction result live range can't end at {}", block);
}
}
}
@@ -310,7 +310,7 @@ impl LiveValueTracker {
/// Drop any values that are marked as `is_dead`.
///
/// Use this after calling `ebb_top` to clean out dead EBB parameters.
/// Use this after calling `block_top` to clean out dead block parameters.
pub fn drop_dead_params(&mut self) {
self.live.remove_dead_values();
}

View File

@@ -7,18 +7,18 @@
//! # Liveness consumers
//!
//! The primary consumer of the liveness analysis is the SSA coloring pass which goes through each
//! EBB and assigns a register to the defined values. This algorithm needs to maintain a set of the
//! currently live values as it is iterating down the instructions in the EBB. It asks the
//! block and assigns a register to the defined values. This algorithm needs to maintain a set of the
//! currently live values as it is iterating down the instructions in the block. It asks the
//! following questions:
//!
//! - What is the set of live values at the entry to the EBB?
//! - When moving past a use of a value, is that value still alive in the EBB, or was that the last
//! - What is the set of live values at the entry to the block?
//! - When moving past a use of a value, is that value still alive in the block, or was that the last
//! use?
//! - When moving past a branch, which of the live values are still live below the branch?
//!
//! The set of `LiveRange` instances can answer these questions through their `def_local_end` and
//! `livein_local_end` queries. The coloring algorithm visits EBBs in a topological order of the
//! dominator tree, so it can compute the set of live values at the beginning of an EBB by starting
//! `livein_local_end` queries. The coloring algorithm visits blocks in a topological order of the
//! dominator tree, so it can compute the set of live values at the beginning of an block by starting
//! from the set of live values at the dominating branch instruction and filtering it with
//! `livein_local_end`. These sets do not need to be stored in the liveness analysis.
//!
@@ -43,7 +43,7 @@
//!
//! - Quadratic memory use. We need a bit per variable per basic block in the function.
//! - Dense representation of sparse data. In practice, the majority of SSA values never leave
//! their basic block, and those that do span basic blocks rarely span a large number of basic
//! their basic block, and those that do spa basic blocks rarely span a large number of basic
//! blocks. This makes the data stored in the bitvectors quite sparse.
//! - Traditionally, the data-flow equations were solved for real program *variables* which does
//! not include temporaries used in evaluating expressions. We have an SSA form program which
@@ -141,10 +141,10 @@
//! - The first time a value is encountered, its live range is constructed as a dead live range
//! containing only the defining program point.
//! - The local interval of the value's live range is extended so it reaches the use. This may
//! require creating a new live-in local interval for the EBB.
//! - If the live range became live-in to the EBB, add the EBB to a work-list.
//! - While the work-list is non-empty pop a live-in EBB and repeat the two steps above, using each
//! of the live-in EBB's CFG predecessor instructions as a 'use'.
//! require creating a new live-in local interval for the block.
//! - If the live range became live-in to the block, add the block to a work-list.
//! - While the work-list is non-empty pop a live-in block and repeat the two steps above, using each
//! of the live-in block's CFG predecessor instructions as a 'use'.
//!
//! The effect of this algorithm is to extend the live range of each to reach uses as they are
//! visited. No data about each value beyond the live range is needed between visiting uses, so
@@ -176,9 +176,9 @@
//! There is some room for improvement.
use crate::entity::SparseMap;
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::dfg::ValueDef;
use crate::ir::{Ebb, Function, Inst, Layout, ProgramPoint, Value};
use crate::ir::{Block, Function, Inst, Layout, ProgramPoint, Value};
use crate::isa::{EncInfo, OperandConstraint, TargetIsa};
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liverange::LiveRange;
@@ -223,14 +223,14 @@ fn get_or_create<'a>(
})
.unwrap_or_default();
}
ValueDef::Param(ebb, num) => {
def = ebb.into();
if func.layout.entry_block() == Some(ebb) {
ValueDef::Param(block, num) => {
def = block.into();
if func.layout.entry_block() == Some(block) {
// The affinity for entry block parameters can be inferred from the function
// signature.
affinity = Affinity::abi(&func.signature.params[num], isa);
} else {
// Give normal EBB parameters a register affinity matching their type.
// Give normal block parameters a register affinity matching their type.
let rc = isa.regclass_for_abi_type(func.dfg.value_type(value));
affinity = Affinity::Reg(rc.into());
}
@@ -241,43 +241,43 @@ fn get_or_create<'a>(
lrset.get_mut(value).unwrap()
}
/// Extend the live range for `value` so it reaches `to` which must live in `ebb`.
/// Extend the live range for `value` so it reaches `to` which must live in `block`.
fn extend_to_use(
lr: &mut LiveRange,
ebb: Ebb,
block: Block,
to: Inst,
worklist: &mut Vec<Ebb>,
worklist: &mut Vec<Block>,
func: &Function,
cfg: &ControlFlowGraph,
) {
// This is our scratch working space, and we'll leave it empty when we return.
debug_assert!(worklist.is_empty());
// Extend the range locally in `ebb`.
// Extend the range locally in `block`.
// If there already was a live interval in that block, we're done.
if lr.extend_in_ebb(ebb, to, &func.layout) {
worklist.push(ebb);
if lr.extend_in_block(block, to, &func.layout) {
worklist.push(block);
}
// The work list contains those EBBs where we have learned that the value needs to be
// The work list contains those blocks where we have learned that the value needs to be
// live-in.
//
// This algorithm becomes a depth-first traversal up the CFG, enumerating all paths through the
// CFG from the existing live range to `ebb`.
// CFG from the existing live range to `block`.
//
// Extend the live range as we go. The live range itself also serves as a visited set since
// `extend_in_ebb` will never return true twice for the same EBB.
// `extend_in_block` will never return true twice for the same block.
//
while let Some(livein) = worklist.pop() {
// We've learned that the value needs to be live-in to the `livein` EBB.
// We've learned that the value needs to be live-in to the `livein` block.
// Make sure it is also live at all predecessor branches to `livein`.
for BasicBlock {
ebb: pred,
for BlockPredecessor {
block: pred,
inst: branch,
} in cfg.pred_iter(livein)
{
if lr.extend_in_ebb(pred, branch, &func.layout) {
// This predecessor EBB also became live-in. We need to process it later.
if lr.extend_in_block(pred, branch, &func.layout) {
// This predecessor block also became live-in. We need to process it later.
worklist.push(pred);
}
}
@@ -294,7 +294,7 @@ pub struct Liveness {
/// Working space for the `extend_to_use` algorithm.
/// This vector is always empty, except for inside that function.
/// It lives here to avoid repeated allocation of scratch memory.
worklist: Vec<Ebb>,
worklist: Vec<Block>,
}
impl Liveness {
@@ -342,7 +342,7 @@ impl Liveness {
/// Move the definition of `value` to `def`.
///
/// The old and new def points must be in the same EBB, and before the end of the live range.
/// The old and new def points must be in the same block, and before the end of the live range.
pub fn move_def_locally<PP>(&mut self, value: Value, def: PP)
where
PP: Into<ProgramPoint>,
@@ -353,20 +353,20 @@ impl Liveness {
/// Locally extend the live range for `value` to reach `user`.
///
/// It is assumed the `value` is already live before `user` in `ebb`.
/// It is assumed the `value` is already live before `user` in `block`.
///
/// Returns a mutable reference to the value's affinity in case that also needs to be updated.
pub fn extend_locally(
&mut self,
value: Value,
ebb: Ebb,
block: Block,
user: Inst,
layout: &Layout,
) -> &mut Affinity {
debug_assert_eq!(Some(ebb), layout.inst_ebb(user));
debug_assert_eq!(Some(block), layout.inst_block(user));
let lr = self.ranges.get_mut(value).expect("Value has no live range");
let livein = lr.extend_in_ebb(ebb, user, layout);
debug_assert!(!livein, "{} should already be live in {}", value, ebb);
let livein = lr.extend_in_block(block, user, layout);
debug_assert!(!livein, "{} should already be live in {}", value, block);
&mut lr.affinity
}
@@ -389,15 +389,15 @@ impl Liveness {
// The liveness computation needs to visit all uses, but the order doesn't matter.
// TODO: Perhaps this traversal of the function could be combined with a dead code
// elimination pass if we visit a post-order of the dominator tree?
for ebb in func.layout.ebbs() {
// Make sure we have created live ranges for dead EBB parameters.
for block in func.layout.blocks() {
// Make sure we have created live ranges for dead block parameters.
// TODO: If these parameters are really dead, we could remove them, except for the
// entry block which must match the function signature.
for &arg in func.dfg.ebb_params(ebb) {
for &arg in func.dfg.block_params(block) {
get_or_create(&mut self.ranges, arg, isa, func, &encinfo);
}
for inst in func.layout.ebb_insts(ebb) {
for inst in func.layout.block_insts(block) {
// Eliminate all value aliases, they would confuse the register allocator.
func.dfg.resolve_aliases_in_arguments(inst);
@@ -419,11 +419,11 @@ impl Liveness {
let lr = get_or_create(&mut self.ranges, arg, isa, func, &encinfo);
// Extend the live range to reach this use.
extend_to_use(lr, ebb, inst, &mut self.worklist, func, cfg);
extend_to_use(lr, block, inst, &mut self.worklist, func, cfg);
// Apply operand constraint, ignoring any variable arguments after the fixed
// operands described by `operand_constraints`. Variable arguments are either
// EBB arguments or call/return ABI arguments.
// block arguments or call/return ABI arguments.
if let Some(constraint) = operand_constraints.next() {
lr.affinity.merge(constraint, &reginfo);
}

View File

@@ -6,29 +6,29 @@
//!
//! # Local Live Ranges
//!
//! Inside a single extended basic block, the live range of a value is always an interval between
//! two program points (if the value is live in the EBB at all). The starting point is either:
//! Inside a single basic block, the live range of a value is always an interval between
//! two program points (if the value is live in the block at all). The starting point is either:
//!
//! 1. The instruction that defines the value, or
//! 2. The EBB header, because the value is an argument to the EBB, or
//! 3. The EBB header, because the value is defined in another EBB and live-in to this one.
//! 2. The block header, because the value is an argument to the block, or
//! 3. The block header, because the value is defined in another block and live-in to this one.
//!
//! The ending point of the local live range is the last of the following program points in the
//! EBB:
//! block:
//!
//! 1. The last use in the EBB, where a *use* is an instruction that has the value as an argument.
//! 2. The last branch or jump instruction in the EBB that can reach a use.
//! 1. The last use in the block, where a *use* is an instruction that has the value as an argument.
//! 2. The last branch or jump instruction in the block that can reach a use.
//! 3. If the value has no uses anywhere (a *dead value*), the program point that defines it.
//!
//! Note that 2. includes loop back-edges to the same EBB. In general, if a value is defined
//! Note that 2. includes loop back-edges to the same block. In general, if a value is defined
//! outside a loop and used inside the loop, it will be live in the entire loop.
//!
//! # Global Live Ranges
//!
//! Values that appear in more than one EBB have a *global live range* which can be seen as the
//! disjoint union of the per-EBB local intervals for all of the EBBs where the value is live.
//! Together with a `ProgramOrder` which provides a linear ordering of the EBBs, the global live
//! range becomes a linear sequence of disjoint intervals, at most one per EBB.
//! Values that appear in more than one block have a *global live range* which can be seen as the
//! disjoint union of the per-block local intervals for all of the blocks where the value is live.
//! Together with a `ProgramOrder` which provides a linear ordering of the blocks, the global live
//! range becomes a linear sequence of disjoint intervals, at most one per block.
//!
//! In the special case of a dead value, the global live range is a single interval where the start
//! and end points are the same. The global live range of a value is never completely empty.
@@ -64,58 +64,58 @@
//! ## Current representation
//!
//! Our current implementation uses a sorted array of compressed intervals, represented by their
//! boundaries (Ebb, Inst), sorted by Ebb. This is a simple data structure, enables coalescing of
//! boundaries (Block, Inst), sorted by Block. This is a simple data structure, enables coalescing of
//! intervals easily, and shows some nice performance behavior. See
//! https://github.com/bytecodealliance/cranelift/issues/1084 for benchmarks against using a
//! bforest::Map<Ebb, Inst>.
//! bforest::Map<Block, Inst>.
//!
//! ## EBB ordering
//! ## block ordering
//!
//! The relative order of EBBs is used to maintain a sorted list of live-in intervals and to
//! coalesce adjacent live-in intervals when the prior interval covers the whole EBB. This doesn't
//! The relative order of blocks is used to maintain a sorted list of live-in intervals and to
//! coalesce adjacent live-in intervals when the prior interval covers the whole block. This doesn't
//! depend on any property of the program order, so alternative orderings are possible:
//!
//! 1. The EBB layout order. This is what we currently use.
//! 1. The block layout order. This is what we currently use.
//! 2. A topological order of the dominator tree. All the live-in intervals would come after the
//! def interval.
//! 3. A numerical order by EBB number. Performant because it doesn't need to indirect through the
//! 3. A numerical order by block number. Performant because it doesn't need to indirect through the
//! `ProgramOrder` for comparisons.
//!
//! These orderings will cause small differences in coalescing opportunities, but all of them would
//! do a decent job of compressing a long live range. The numerical order might be preferable
//! because:
//!
//! - It has better performance because EBB numbers can be compared directly without any table
//! - It has better performance because block numbers can be compared directly without any table
//! lookups.
//! - If EBB numbers are not reused, it is safe to allocate new EBBs without getting spurious
//! live-in intervals from any coalesced representations that happen to cross a new EBB.
//! - If block numbers are not reused, it is safe to allocate new blocks without getting spurious
//! live-in intervals from any coalesced representations that happen to cross a new block.
//!
//! For comparing instructions, the layout order is always what we want.
//!
//! ## Alternative representation
//!
//! Since a local live-in interval always begins at its EBB header, it is uniquely described by its
//! end point instruction alone. We can use the layout to look up the EBB containing the end point.
//! Since a local live-in interval always begins at its block header, it is uniquely described by its
//! end point instruction alone. We can use the layout to look up the block containing the end point.
//! This means that a sorted `Vec<Inst>` would be enough to represent the set of live-in intervals.
//!
//! Coalescing is an important compression technique because some live ranges can span thousands of
//! EBBs. We can represent that by switching to a sorted `Vec<ProgramPoint>` representation where
//! an `[Ebb, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding
//! `Ebb` entry represents a single live-in interval.
//! blocks. We can represent that by switching to a sorted `Vec<ProgramPoint>` representation where
//! an `[Block, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding
//! `Block` entry represents a single live-in interval.
//!
//! This representation is more compact for a live range with many uncoalesced live-in intervals.
//! It is more complicated to work with, though, so it is probably not worth it. The performance
//! benefits of switching to a numerical EBB order only appears if the binary search is doing
//! EBB-EBB comparisons.
//! benefits of switching to a numerical block order only appears if the binary search is doing
//! block-block comparisons.
//!
//! A `BTreeMap<Ebb, Inst>` could have been used for the live-in intervals, but it doesn't provide
//! A `BTreeMap<Block, Inst>` could have been used for the live-in intervals, but it doesn't provide
//! the necessary API to make coalescing easy, nor does it optimize for our types' sizes.
//!
//! Even the specialized `bforest::Map<Ebb, Inst>` implementation is slower than a plain sorted
//! Even the specialized `bforest::Map<Block, Inst>` implementation is slower than a plain sorted
//! array, see https://github.com/bytecodealliance/cranelift/issues/1084 for details.
use crate::entity::SparseMapValue;
use crate::ir::{Ebb, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value};
use crate::ir::{Block, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value};
use crate::regalloc::affinity::Affinity;
use core::cmp::Ordering;
use core::marker::PhantomData;
@@ -124,14 +124,14 @@ use smallvec::SmallVec;
/// Global live range of a single SSA value.
///
/// As [explained in the module documentation](index.html#local-live-ranges), the live range of an
/// SSA value is the disjoint union of a set of intervals, each local to a single EBB, and with at
/// most one interval per EBB. We further distinguish between:
/// SSA value is the disjoint union of a set of intervals, each local to a single block, and with at
/// most one interval per block. We further distinguish between:
///
/// 1. The *def interval* is the local interval in the EBB where the value is defined, and
/// 2. The *live-in intervals* are the local intervals in the remaining EBBs.
/// 1. The *def interval* is the local interval in the block where the value is defined, and
/// 2. The *live-in intervals* are the local intervals in the remaining blocks.
///
/// A live-in interval always begins at the EBB header, while the def interval can begin at the
/// defining instruction, or at the EBB header for an EBB argument value.
/// A live-in interval always begins at the block header, while the def interval can begin at the
/// defining instruction, or at the block header for an block argument value.
///
/// All values have a def interval, but a large proportion of values don't have any live-in
/// intervals. These are called *local live ranges*.
@@ -139,11 +139,11 @@ use smallvec::SmallVec;
/// # Program order requirements
///
/// The internal representation of a `LiveRange` depends on a consistent `ProgramOrder` both for
/// ordering instructions inside an EBB *and* for ordering EBBs. The methods that depend on the
/// ordering instructions inside an block *and* for ordering blocks. The methods that depend on the
/// ordering take an explicit `ProgramOrder` object, and it is the caller's responsibility to
/// ensure that the provided ordering is consistent between calls.
///
/// In particular, changing the order of EBBs or inserting new EBBs will invalidate live ranges.
/// In particular, changing the order of blocks or inserting new blocks will invalidate live ranges.
///
/// Inserting new instructions in the layout is safe, but removing instructions is not. Besides the
/// instructions using or defining their value, `LiveRange` structs can contain references to
@@ -152,7 +152,7 @@ pub type LiveRange = GenericLiveRange<Layout>;
// See comment of liveins below.
pub struct Interval {
begin: Ebb,
begin: Block,
end: Inst,
}
@@ -168,10 +168,10 @@ pub struct GenericLiveRange<PO: ProgramOrder> {
/// The preferred register allocation for this value.
pub affinity: Affinity,
/// The instruction or EBB header where this value is defined.
/// The instruction or block header where this value is defined.
def_begin: ProgramPoint,
/// The end point of the def interval. This must always belong to the same EBB as `def_begin`.
/// The end point of the def interval. This must always belong to the same block as `def_begin`.
///
/// We always have `def_begin <= def_end` with equality implying a dead def live range with no
/// uses.
@@ -179,12 +179,12 @@ pub struct GenericLiveRange<PO: ProgramOrder> {
/// Additional live-in intervals sorted in program order.
///
/// This vector is empty for most values which are only used in one EBB.
/// This vector is empty for most values which are only used in one block.
///
/// An entry `ebb -> inst` means that the live range is live-in to `ebb`, continuing up to
/// `inst` which may belong to a later EBB in the program order.
/// An entry `block -> inst` means that the live range is live-in to `block`, continuing up to
/// `inst` which may belong to a later block in the program order.
///
/// The entries are non-overlapping, and none of them overlap the EBB where the value is
/// The entries are non-overlapping, and none of them overlap the block where the value is
/// defined.
liveins: SmallVec<[Interval; 2]>,
@@ -210,7 +210,7 @@ macro_rules! cmp {
impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Create a new live range for `value` defined at `def`.
///
/// The live range will be created as dead, but it can be extended with `extend_in_ebb()`.
/// The live range will be created as dead, but it can be extended with `extend_in_block()`.
pub fn new(value: Value, def: ProgramPoint, affinity: Affinity) -> Self {
Self {
value,
@@ -222,14 +222,14 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
}
/// Finds an entry in the compressed set of live-in intervals that contains `ebb`, or return
/// Finds an entry in the compressed set of live-in intervals that contains `block`, or return
/// the position where to insert such a new entry.
fn lookup_entry_containing_ebb(&self, ebb: Ebb, order: &PO) -> Result<usize, usize> {
fn lookup_entry_containing_block(&self, block: Block, order: &PO) -> Result<usize, usize> {
self.liveins
.binary_search_by(|interval| order.cmp(interval.begin, ebb))
.binary_search_by(|interval| order.cmp(interval.begin, block))
.or_else(|n| {
// The previous interval's end might cover the searched ebb.
if n > 0 && cmp!(order, ebb <= self.liveins[n - 1].end) {
// The previous interval's end might cover the searched block.
if n > 0 && cmp!(order, block <= self.liveins[n - 1].end) {
Ok(n - 1)
} else {
Err(n)
@@ -237,23 +237,23 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
})
}
/// Extend the local interval for `ebb` so it reaches `to` which must belong to `ebb`.
/// Extend the local interval for `block` so it reaches `to` which must belong to `block`.
/// Create a live-in interval if necessary.
///
/// If the live range already has a local interval in `ebb`, extend its end point so it
/// If the live range already has a local interval in `block`, extend its end point so it
/// includes `to`, and return false.
///
/// If the live range did not previously have a local interval in `ebb`, add one so the value
/// is live-in to `ebb`, extending to `to`. Return true.
/// If the live range did not previously have a local interval in `block`, add one so the value
/// is live-in to `block`, extending to `to`. Return true.
///
/// The return value can be used to detect if we just learned that the value is live-in to
/// `ebb`. This can trigger recursive extensions in `ebb`'s CFG predecessor blocks.
pub fn extend_in_ebb(&mut self, ebb: Ebb, inst: Inst, order: &PO) -> bool {
/// `block`. This can trigger recursive extensions in `block`'s CFG predecessor blocks.
pub fn extend_in_block(&mut self, block: Block, inst: Inst, order: &PO) -> bool {
// First check if we're extending the def interval.
//
// We're assuming here that `inst` never precedes `def_begin` in the same EBB, but we can't
// check it without a method for getting `inst`'s EBB.
if cmp!(order, ebb <= self.def_end) && cmp!(order, inst >= self.def_begin) {
// We're assuming here that `inst` never precedes `def_begin` in the same block, but we can't
// check it without a method for getting `inst`'s block.
if cmp!(order, block <= self.def_end) && cmp!(order, inst >= self.def_begin) {
let inst_pp = inst.into();
debug_assert_ne!(
inst_pp, self.def_begin,
@@ -266,7 +266,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
// Now check if we're extending any of the existing live-in intervals.
match self.lookup_entry_containing_ebb(ebb, order) {
match self.lookup_entry_containing_block(block, order) {
Ok(n) => {
// We found one interval and might need to extend it.
if cmp!(order, inst <= self.liveins[n].end) {
@@ -278,7 +278,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
// coalesce the two intervals:
// [ival.begin; ival.end] + [next.begin; next.end] = [ival.begin; next.end]
if let Some(next) = &self.liveins.get(n + 1) {
if order.is_ebb_gap(inst, next.begin) {
if order.is_block_gap(inst, next.begin) {
// At this point we can choose to remove the current interval or the next
// one; remove the next one to avoid one memory move.
let next_end = next.end;
@@ -295,17 +295,17 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
Err(n) => {
// No interval was found containing the current EBB: we need to insert a new one,
// No interval was found containing the current block: we need to insert a new one,
// unless there's a coalescing opportunity with the previous or next one.
let coalesce_next = self
.liveins
.get(n)
.filter(|next| order.is_ebb_gap(inst, next.begin))
.filter(|next| order.is_block_gap(inst, next.begin))
.is_some();
let coalesce_prev = self
.liveins
.get(n.wrapping_sub(1))
.filter(|prev| order.is_ebb_gap(prev.end, ebb))
.filter(|prev| order.is_block_gap(prev.end, block))
.is_some();
match (coalesce_prev, coalesce_next) {
@@ -324,8 +324,8 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
self.liveins[n - 1].end = inst;
}
(false, true) => {
debug_assert!(cmp!(order, ebb <= self.liveins[n].begin));
self.liveins[n].begin = ebb;
debug_assert!(cmp!(order, block <= self.liveins[n].begin));
self.liveins[n].begin = block;
}
(false, false) => {
@@ -333,7 +333,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
self.liveins.insert(
n,
Interval {
begin: ebb,
begin: block,
end: inst,
},
);
@@ -355,15 +355,15 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Is this a local live range?
///
/// A local live range is only used in the same EBB where it was defined. It is allowed to span
/// multiple basic blocks within that EBB.
/// A local live range is only used in the same block where it was defined. It is allowed to span
/// multiple basic blocks within that block.
pub fn is_local(&self) -> bool {
self.liveins.is_empty()
}
/// Get the program point where this live range is defined.
///
/// This will be an EBB header when the value is an EBB argument, otherwise it is the defining
/// This will be an block header when the value is an block argument, otherwise it is the defining
/// instruction.
pub fn def(&self) -> ProgramPoint {
self.def_begin
@@ -371,33 +371,33 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Move the definition of this value to a new program point.
///
/// It is only valid to move the definition within the same EBB, and it can't be moved beyond
/// It is only valid to move the definition within the same block, and it can't be moved beyond
/// `def_local_end()`.
pub fn move_def_locally(&mut self, def: ProgramPoint) {
self.def_begin = def;
}
/// Get the local end-point of this live range in the EBB where it is defined.
/// Get the local end-point of this live range in the block where it is defined.
///
/// This can be the EBB header itself in the case of a dead EBB argument.
/// This can be the block header itself in the case of a dead block argument.
/// Otherwise, it will be the last local use or branch/jump that can reach a use.
pub fn def_local_end(&self) -> ProgramPoint {
self.def_end
}
/// Get the local end-point of this live range in an EBB where it is live-in.
/// Get the local end-point of this live range in an block where it is live-in.
///
/// If this live range is not live-in to `ebb`, return `None`. Otherwise, return the end-point
/// of this live range's local interval in `ebb`.
/// If this live range is not live-in to `block`, return `None`. Otherwise, return the end-point
/// of this live range's local interval in `block`.
///
/// If the live range is live through all of `ebb`, the terminator of `ebb` is a correct
/// If the live range is live through all of `block`, the terminator of `block` is a correct
/// answer, but it is also possible that an even later program point is returned. So don't
/// depend on the returned `Inst` to belong to `ebb`.
pub fn livein_local_end(&self, ebb: Ebb, order: &PO) -> Option<Inst> {
self.lookup_entry_containing_ebb(ebb, order)
/// depend on the returned `Inst` to belong to `block`.
pub fn livein_local_end(&self, block: Block, order: &PO) -> Option<Inst> {
self.lookup_entry_containing_block(block, order)
.and_then(|i| {
let inst = self.liveins[i].end;
if cmp!(order, ebb < inst) {
if cmp!(order, block < inst) {
Ok(inst)
} else {
// Can be any error type, really, since it's discarded by ok().
@@ -407,25 +407,25 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
.ok()
}
/// Is this value live-in to `ebb`?
/// Is this value live-in to `block`?
///
/// An EBB argument is not considered to be live in.
pub fn is_livein(&self, ebb: Ebb, order: &PO) -> bool {
self.livein_local_end(ebb, order).is_some()
/// An block argument is not considered to be live in.
pub fn is_livein(&self, block: Block, order: &PO) -> bool {
self.livein_local_end(block, order).is_some()
}
/// Get all the live-in intervals.
///
/// Note that the intervals are stored in a compressed form so each entry may span multiple
/// EBBs where the value is live in.
pub fn liveins<'a>(&'a self) -> impl Iterator<Item = (Ebb, Inst)> + 'a {
/// blocks where the value is live in.
pub fn liveins<'a>(&'a self) -> impl Iterator<Item = (Block, Inst)> + 'a {
self.liveins
.iter()
.map(|interval| (interval.begin, interval.end))
}
/// Check if this live range overlaps a definition in `ebb`.
pub fn overlaps_def(&self, def: ExpandedProgramPoint, ebb: Ebb, order: &PO) -> bool {
/// Check if this live range overlaps a definition in `block`.
pub fn overlaps_def(&self, def: ExpandedProgramPoint, block: Block, order: &PO) -> bool {
// Two defs at the same program point always overlap, even if one is dead.
if def == self.def_begin.into() {
return true;
@@ -437,29 +437,29 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
// Check for an overlap with a live-in range.
match self.livein_local_end(ebb, order) {
match self.livein_local_end(block, order) {
Some(inst) => cmp!(order, def < inst),
None => false,
}
}
/// Check if this live range reaches a use at `user` in `ebb`.
pub fn reaches_use(&self, user: Inst, ebb: Ebb, order: &PO) -> bool {
/// Check if this live range reaches a use at `user` in `block`.
pub fn reaches_use(&self, user: Inst, block: Block, order: &PO) -> bool {
// Check for an overlap with the local range.
if cmp!(order, user > self.def_begin) && cmp!(order, user <= self.def_end) {
return true;
}
// Check for an overlap with a live-in range.
match self.livein_local_end(ebb, order) {
match self.livein_local_end(block, order) {
Some(inst) => cmp!(order, user <= inst),
None => false,
}
}
/// Check if this live range is killed at `user` in `ebb`.
pub fn killed_at(&self, user: Inst, ebb: Ebb, order: &PO) -> bool {
self.def_local_end() == user.into() || self.livein_local_end(ebb, order) == Some(user)
/// Check if this live range is killed at `user` in `block`.
pub fn killed_at(&self, user: Inst, block: Block, order: &PO) -> bool {
self.def_local_end() == user.into() || self.livein_local_end(block, order) == Some(user)
}
}
@@ -474,15 +474,15 @@ impl<PO: ProgramOrder> SparseMapValue<Value> for GenericLiveRange<PO> {
mod tests {
use super::{GenericLiveRange, Interval};
use crate::entity::EntityRef;
use crate::ir::{Ebb, Inst, Value};
use crate::ir::{Block, Inst, Value};
use crate::ir::{ExpandedProgramPoint, ProgramOrder};
use alloc::vec::Vec;
use core::cmp::Ordering;
// Dummy program order which simply compares indexes.
// It is assumed that EBBs have indexes that are multiples of 10, and instructions have indexes
// in between. `is_ebb_gap` assumes that terminator instructions have indexes of the form
// ebb * 10 + 1. This is used in the coalesce test.
// It is assumed that blocks have indexes that are multiples of 10, and instructions have indexes
// in between. `is_block_gap` assumes that terminator instructions have indexes of the form
// block * 10 + 1. This is used in the coalesce test.
struct ProgOrder {}
impl ProgramOrder for ProgOrder {
@@ -494,7 +494,7 @@ mod tests {
fn idx(pp: ExpandedProgramPoint) -> usize {
match pp {
ExpandedProgramPoint::Inst(i) => i.index(),
ExpandedProgramPoint::Ebb(e) => e.index(),
ExpandedProgramPoint::Block(e) => e.index(),
}
}
@@ -503,31 +503,31 @@ mod tests {
ia.cmp(&ib)
}
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool {
inst.index() % 10 == 1 && ebb.index() / 10 == inst.index() / 10 + 1
fn is_block_gap(&self, inst: Inst, block: Block) -> bool {
inst.index() % 10 == 1 && block.index() / 10 == inst.index() / 10 + 1
}
}
impl ProgOrder {
// Get the EBB corresponding to `inst`.
fn inst_ebb(&self, inst: Inst) -> Ebb {
// Get the block corresponding to `inst`.
fn inst_block(&self, inst: Inst) -> Block {
let i = inst.index();
Ebb::new(i - i % 10)
Block::new(i - i % 10)
}
// Get the EBB of a program point.
fn pp_ebb<PP: Into<ExpandedProgramPoint>>(&self, pp: PP) -> Ebb {
// Get the block of a program point.
fn pp_block<PP: Into<ExpandedProgramPoint>>(&self, pp: PP) -> Block {
match pp.into() {
ExpandedProgramPoint::Inst(i) => self.inst_ebb(i),
ExpandedProgramPoint::Ebb(e) => e,
ExpandedProgramPoint::Inst(i) => self.inst_block(i),
ExpandedProgramPoint::Block(e) => e,
}
}
// Validate the live range invariants.
fn validate(&self, lr: &GenericLiveRange<Self>) {
// The def interval must cover a single EBB.
let def_ebb = self.pp_ebb(lr.def_begin);
assert_eq!(def_ebb, self.pp_ebb(lr.def_end));
// The def interval must cover a single block.
let def_block = self.pp_block(lr.def_begin);
assert_eq!(def_block, self.pp_block(lr.def_end));
// Check that the def interval isn't backwards.
match self.cmp(lr.def_begin, lr.def_end) {
@@ -552,7 +552,7 @@ mod tests {
assert!(
self.cmp(lr.def_end, begin) == Ordering::Less
|| self.cmp(lr.def_begin, end) == Ordering::Greater,
"Interval can't overlap the def EBB"
"Interval can't overlap the def block"
);
// Save for next round.
@@ -567,10 +567,10 @@ mod tests {
#[test]
fn dead_def_range() {
let v0 = Value::new(0);
let e0 = Ebb::new(0);
let e0 = Block::new(0);
let i1 = Inst::new(1);
let i2 = Inst::new(2);
let e2 = Ebb::new(2);
let e2 = Block::new(2);
let lr = GenericLiveRange::new(v0, i1.into(), Default::default());
assert!(lr.is_dead());
assert!(lr.is_local());
@@ -588,13 +588,13 @@ mod tests {
#[test]
fn dead_arg_range() {
let v0 = Value::new(0);
let e2 = Ebb::new(2);
let e2 = Block::new(2);
let lr = GenericLiveRange::new(v0, e2.into(), Default::default());
assert!(lr.is_dead());
assert!(lr.is_local());
assert_eq!(lr.def(), e2.into());
assert_eq!(lr.def_local_end(), e2.into());
// The def interval of an EBB argument does not count as live-in.
// The def interval of an block argument does not count as live-in.
assert_eq!(lr.livein_local_end(e2, PO), None);
PO.validate(&lr);
}
@@ -602,13 +602,13 @@ mod tests {
#[test]
fn local_def() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let i13 = Inst::new(13);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e10, i13, PO), false);
assert_eq!(lr.extend_in_block(e10, i13, PO), false);
PO.validate(&lr);
assert!(!lr.is_dead());
assert!(lr.is_local());
@@ -616,7 +616,7 @@ mod tests {
assert_eq!(lr.def_local_end(), i13.into());
// Extending to an already covered inst should not change anything.
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), i11.into());
assert_eq!(lr.def_local_end(), i13.into());
@@ -625,15 +625,15 @@ mod tests {
#[test]
fn local_arg() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let i13 = Inst::new(13);
let mut lr = GenericLiveRange::new(v0, e10.into(), Default::default());
// Extending a dead EBB argument in its own block should not indicate that a live-in
// Extending a dead block argument in its own block should not indicate that a live-in
// interval was created.
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
PO.validate(&lr);
assert!(!lr.is_dead());
assert!(lr.is_local());
@@ -641,13 +641,13 @@ mod tests {
assert_eq!(lr.def_local_end(), i12.into());
// Extending to an already covered inst should not change anything.
assert_eq!(lr.extend_in_ebb(e10, i11, PO), false);
assert_eq!(lr.extend_in_block(e10, i11, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), e10.into());
assert_eq!(lr.def_local_end(), i12.into());
// Extending further.
assert_eq!(lr.extend_in_ebb(e10, i13, PO), false);
assert_eq!(lr.extend_in_block(e10, i13, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), e10.into());
assert_eq!(lr.def_local_end(), i13.into());
@@ -656,28 +656,28 @@ mod tests {
#[test]
fn global_def() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let e20 = Ebb::new(20);
let e20 = Block::new(20);
let i21 = Inst::new(21);
let i22 = Inst::new(22);
let i23 = Inst::new(23);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
// Adding a live-in interval.
assert_eq!(lr.extend_in_ebb(e20, i22, PO), true);
assert_eq!(lr.extend_in_block(e20, i22, PO), true);
PO.validate(&lr);
assert_eq!(lr.livein_local_end(e20, PO), Some(i22));
// Non-extending the live-in.
assert_eq!(lr.extend_in_ebb(e20, i21, PO), false);
assert_eq!(lr.extend_in_block(e20, i21, PO), false);
assert_eq!(lr.livein_local_end(e20, PO), Some(i22));
// Extending the existing live-in.
assert_eq!(lr.extend_in_ebb(e20, i23, PO), false);
assert_eq!(lr.extend_in_block(e20, i23, PO), false);
PO.validate(&lr);
assert_eq!(lr.livein_local_end(e20, PO), Some(i23));
}
@@ -686,35 +686,35 @@ mod tests {
fn coalesce() {
let v0 = Value::new(0);
let i11 = Inst::new(11);
let e20 = Ebb::new(20);
let e20 = Block::new(20);
let i21 = Inst::new(21);
let e30 = Ebb::new(30);
let e30 = Block::new(30);
let i31 = Inst::new(31);
let e40 = Ebb::new(40);
let e40 = Block::new(40);
let i41 = Inst::new(41);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true);
assert_eq!(lr.extend_in_block(e30, i31, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e30, i31)]);
// Coalesce to previous
assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true);
assert_eq!(lr.extend_in_block(e40, i41, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e30, i41)]);
// Coalesce to next
assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true);
assert_eq!(lr.extend_in_block(e20, i21, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i41)]);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true);
assert_eq!(lr.extend_in_block(e40, i41, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e40, i41)]);
assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true);
assert_eq!(lr.extend_in_block(e20, i21, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i21), (e40, i41)]);
// Coalesce to previous and next
assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true);
assert_eq!(lr.extend_in_block(e30, i31, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i41)]);
}
}

View File

@@ -13,7 +13,7 @@ use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::entity::{SparseMap, SparseMapValue};
use crate::ir::{AbiParam, ArgumentLoc, InstBuilder};
use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueLoc};
use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueLoc};
use crate::isa::RegClass;
use crate::isa::{ConstraintKind, EncInfo, Encoding, RecipeConstraints, TargetIsa};
use crate::regalloc::affinity::Affinity;
@@ -113,24 +113,24 @@ impl SparseMapValue<Value> for ReloadedValue {
impl<'a> Context<'a> {
fn run(&mut self, tracker: &mut LiveValueTracker) {
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_ebb(ebb, tracker);
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_block(block, tracker);
}
}
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Reloading {}:", ebb);
self.visit_ebb_header(ebb, tracker);
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Reloading {}:", block);
self.visit_block_header(block, tracker);
tracker.drop_dead_params();
// visit_ebb_header() places us at the first interesting instruction in the EBB.
// visit_block_header() places us at the first interesting instruction in the block.
while let Some(inst) = self.cur.current_inst() {
if !self.cur.func.dfg[inst].opcode().is_ghost() {
// This instruction either has an encoding or has ABI constraints, so visit it to
// insert spills and fills as needed.
let encoding = self.cur.func.encodings[inst];
self.visit_inst(ebb, inst, encoding, tracker);
self.visit_inst(block, inst, encoding, tracker);
tracker.drop_dead(inst);
} else {
// This is a ghost instruction with no encoding and no extra constraints, so we can
@@ -140,29 +140,29 @@ impl<'a> Context<'a> {
}
}
/// Process the EBB parameters. Move to the next instruction in the EBB to be processed
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.ebb_top(
ebb,
/// Process the block parameters. Move to the next instruction in the block to be processed
fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree,
);
if self.cur.func.layout.entry_block() == Some(ebb) {
if self.cur.func.layout.entry_block() == Some(block) {
debug_assert_eq!(liveins.len(), 0);
self.visit_entry_params(ebb, args);
self.visit_entry_params(block, args);
} else {
self.visit_ebb_params(ebb, args);
self.visit_block_params(block, args);
}
}
/// Visit the parameters on the entry block.
/// These values have ABI constraints from the function signature.
fn visit_entry_params(&mut self, ebb: Ebb, args: &[LiveValue]) {
fn visit_entry_params(&mut self, block: Block, args: &[LiveValue]) {
debug_assert_eq!(self.cur.func.signature.params.len(), args.len());
self.cur.goto_first_inst(ebb);
self.cur.goto_first_inst(block);
for (arg_idx, arg) in args.iter().enumerate() {
let abi = self.cur.func.signature.params[arg_idx];
@@ -175,10 +175,10 @@ impl<'a> Context<'a> {
.cur
.func
.dfg
.replace_ebb_param(arg.value, abi.value_type);
.replace_block_param(arg.value, abi.value_type);
let affinity = Affinity::abi(&abi, self.cur.isa);
self.liveness.create_dead(reg, ebb, affinity);
self.insert_spill(ebb, arg.value, reg);
self.liveness.create_dead(reg, block, affinity);
self.insert_spill(block, arg.value, reg);
}
}
ArgumentLoc::Stack(_) => {
@@ -189,15 +189,15 @@ impl<'a> Context<'a> {
}
}
fn visit_ebb_params(&mut self, ebb: Ebb, _args: &[LiveValue]) {
self.cur.goto_first_inst(ebb);
fn visit_block_params(&mut self, block: Block, _args: &[LiveValue]) {
self.cur.goto_first_inst(block);
}
/// Process the instruction pointed to by `pos`, and advance the cursor to the next instruction
/// that needs processing.
fn visit_inst(
&mut self,
ebb: Ebb,
block: Block,
inst: Inst,
encoding: Encoding,
tracker: &mut LiveValueTracker,
@@ -265,7 +265,7 @@ impl<'a> Context<'a> {
{
self.reload_copy_candidates(inst);
} else {
self.reload_inst_candidates(ebb, inst);
self.reload_inst_candidates(block, inst);
}
// TODO: Reuse reloads for future instructions.
@@ -304,7 +304,7 @@ impl<'a> Context<'a> {
let value_type = self.cur.func.dfg.value_type(lv.value);
let reg = self.cur.func.dfg.replace_result(lv.value, value_type);
self.liveness.create_dead(reg, inst, Affinity::new(op));
self.insert_spill(ebb, lv.value, reg);
self.insert_spill(block, lv.value, reg);
}
}
}
@@ -333,14 +333,14 @@ impl<'a> Context<'a> {
let reg = self.cur.func.dfg.replace_result(lv.value, abi.value_type);
self.liveness
.create_dead(reg, inst, Affinity::abi(&abi, self.cur.isa));
self.insert_spill(ebb, lv.value, reg);
self.insert_spill(block, lv.value, reg);
}
}
}
}
// Reload the current candidates for the given `inst`.
fn reload_inst_candidates(&mut self, ebb: Ebb, inst: Inst) {
fn reload_inst_candidates(&mut self, block: Block, inst: Inst) {
// Insert fill instructions before `inst` and replace `cand.value` with the filled value.
for cand in self.candidates.iter_mut() {
if let Some(reload) = self.reloads.get(cand.value) {
@@ -361,15 +361,15 @@ impl<'a> Context<'a> {
let affinity = Affinity::Reg(cand.regclass.into());
self.liveness.create_dead(reg, fill, affinity);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
.extend_locally(reg, block, inst, &self.cur.func.layout);
}
// Rewrite instruction arguments.
//
// Only rewrite those arguments that were identified as candidates. This leaves EBB
// arguments on branches as-is without rewriting them. A spilled EBB argument needs to stay
// spilled because the matching EBB parameter is going to be in the same virtual register
// and therefore the same stack slot as the EBB argument value.
// Only rewrite those arguments that were identified as candidates. This leaves block
// arguments on branches as-is without rewriting them. A spilled block argument needs to stay
// spilled because the matching block parameter is going to be in the same virtual register
// and therefore the same stack slot as the block argument value.
if !self.candidates.is_empty() {
let args = self.cur.func.dfg.inst_args_mut(inst);
while let Some(cand) = self.candidates.pop() {
@@ -448,14 +448,14 @@ impl<'a> Context<'a> {
/// - Insert `stack = spill reg` at `pos`, and assign an encoding.
/// - Move the `stack` live range starting point to the new instruction.
/// - Extend the `reg` live range to reach the new instruction.
fn insert_spill(&mut self, ebb: Ebb, stack: Value, reg: Value) {
fn insert_spill(&mut self, block: Block, stack: Value, reg: Value) {
self.cur.ins().with_result(stack).spill(reg);
let inst = self.cur.built_inst();
// Update live ranges.
self.liveness.move_def_locally(stack, inst);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
.extend_locally(reg, block, inst, &self.cur.func.layout);
}
}

View File

@@ -32,7 +32,7 @@ fn insert_and_encode_safepoint<'f>(
}
// The emit_stackmaps() function analyzes each instruction to retrieve the liveness of
// the defs and operands by traversing a function's ebbs in layout order.
// the defs and operands by traversing a function's blocks in layout order.
pub fn emit_stackmaps(
func: &mut Function,
domtree: &DominatorTree,
@@ -42,13 +42,13 @@ pub fn emit_stackmaps(
) {
let mut curr = func.layout.entry_block();
while let Some(ebb) = curr {
tracker.ebb_top(ebb, &func.dfg, liveness, &func.layout, domtree);
while let Some(block) = curr {
tracker.block_top(block, &func.dfg, liveness, &func.layout, domtree);
tracker.drop_dead_params();
let mut pos = FuncCursor::new(func);
// From the top of the ebb, step through the instructions.
pos.goto_top(ebb);
// From the top of the block, step through the instructions.
pos.goto_top(block);
while let Some(inst) = pos.next_inst() {
if let InstructionData::Trap {
@@ -67,6 +67,6 @@ pub fn emit_stackmaps(
tracker.process_inst(inst, &pos.func.dfg, liveness);
tracker.drop_dead(inst);
}
curr = func.layout.next_ebb(ebb);
curr = func.layout.next_block(block);
}
}

View File

@@ -34,20 +34,20 @@
//! # Register diversions and global interference
//!
//! We can divert register values temporarily to satisfy constraints, but we need to put the
//! values back into their originally assigned register locations before leaving the EBB.
//! Otherwise, values won't be in the right register at the entry point of other EBBs.
//! values back into their originally assigned register locations before leaving the block.
//! Otherwise, values won't be in the right register at the entry point of other blocks.
//!
//! Some values are *local*, and we don't need to worry about putting those values back since they
//! are not used in any other EBBs.
//! are not used in any other blocks.
//!
//! When we assign register locations to defines, we are assigning both the register used locally
//! immediately after the instruction and the register used globally when the defined value is used
//! in a different EBB. We need to avoid interference both locally at the instruction and globally.
//! in a different block. We need to avoid interference both locally at the instruction and globally.
//!
//! We have multiple mappings of values to registers:
//!
//! 1. The initial local mapping before the instruction. This includes any diversions from previous
//! instructions in the EBB, but not diversions for the current instruction.
//! instructions in the block, but not diversions for the current instruction.
//! 2. The local mapping after applying the additional reassignments required to satisfy the
//! constraints of the current instruction.
//! 3. The local mapping after the instruction. This excludes values killed by the instruction and

View File

@@ -17,7 +17,7 @@
use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::ir::{ArgumentLoc, Ebb, Function, Inst, InstBuilder, SigRef, Value, ValueLoc};
use crate::ir::{ArgumentLoc, Block, Function, Inst, InstBuilder, SigRef, Value, ValueLoc};
use crate::isa::registers::{RegClass, RegClassIndex, RegClassMask, RegUnit};
use crate::isa::{ConstraintKind, EncInfo, RecipeConstraints, RegInfo, TargetIsa};
use crate::regalloc::affinity::Affinity;
@@ -121,22 +121,22 @@ impl Spilling {
impl<'a> Context<'a> {
fn run(&mut self, tracker: &mut LiveValueTracker) {
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_ebb(ebb, tracker);
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_block(block, tracker);
}
}
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Spilling {}:", ebb);
self.cur.goto_top(ebb);
self.visit_ebb_header(ebb, tracker);
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Spilling {}:", block);
self.cur.goto_top(block);
self.visit_block_header(block, tracker);
tracker.drop_dead_params();
self.process_spills(tracker);
while let Some(inst) = self.cur.next_inst() {
if !self.cur.func.dfg[inst].opcode().is_ghost() {
self.visit_inst(inst, ebb, tracker);
self.visit_inst(inst, block, tracker);
} else {
let (_throughs, kills) = tracker.process_ghost(inst);
self.free_regs(kills);
@@ -185,9 +185,9 @@ impl<'a> Context<'a> {
}
}
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, params) = tracker.ebb_top(
ebb,
fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) {
let (liveins, params) = tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
@@ -199,26 +199,26 @@ impl<'a> Context<'a> {
self.pressure.reset();
self.take_live_regs(liveins);
// An EBB can have an arbitrary (up to 2^16...) number of parameters, so they are not
// An block can have an arbitrary (up to 2^16...) number of parameters, so they are not
// guaranteed to fit in registers.
for lv in params {
if let Affinity::Reg(rci) = lv.affinity {
let rc = self.reginfo.rc(rci);
'try_take: while let Err(mask) = self.pressure.take_transient(rc) {
debug!("Need {} reg for EBB param {}", rc, lv.value);
debug!("Need {} reg for block param {}", rc, lv.value);
match self.spill_candidate(mask, liveins) {
Some(cand) => {
debug!(
"Spilling live-in {} to make room for {} EBB param {}",
"Spilling live-in {} to make room for {} block param {}",
cand, rc, lv.value
);
self.spill_reg(cand);
}
None => {
// We can't spill any of the live-in registers, so we have to spill an
// EBB argument. Since the current spill metric would consider all the
// EBB arguments equal, just spill the present register.
debug!("Spilling {} EBB argument {}", rc, lv.value);
// block argument. Since the current spill metric would consider all the
// block arguments equal, just spill the present register.
debug!("Spilling {} block argument {}", rc, lv.value);
// Since `spill_reg` will free a register, add the current one here.
self.pressure.take(rc);
@@ -230,15 +230,15 @@ impl<'a> Context<'a> {
}
}
// The transient pressure counts for the EBB arguments are accurate. Just preserve them.
// The transient pressure counts for the block arguments are accurate. Just preserve them.
self.pressure.preserve_transient();
self.free_dead_regs(params);
}
fn visit_inst(&mut self, inst: Inst, ebb: Ebb, tracker: &mut LiveValueTracker) {
fn visit_inst(&mut self, inst: Inst, block: Block, tracker: &mut LiveValueTracker) {
debug!("Inst {}, {}", self.cur.display_inst(inst), self.pressure);
debug_assert_eq!(self.cur.current_inst(), Some(inst));
debug_assert_eq!(self.cur.current_ebb(), Some(ebb));
debug_assert_eq!(self.cur.current_block(), Some(block));
let constraints = self
.encinfo
@@ -246,7 +246,7 @@ impl<'a> Context<'a> {
// We may need to resolve register constraints if there are any noteworthy uses.
debug_assert!(self.reg_uses.is_empty());
self.collect_reg_uses(inst, ebb, constraints);
self.collect_reg_uses(inst, block, constraints);
// Calls usually have fixed register uses.
let call_sig = self.cur.func.dfg.call_signature(inst);
@@ -313,7 +313,12 @@ impl<'a> Context<'a> {
// We are assuming here that if a value is used both by a fixed register operand and a register
// class operand, they two are compatible. We are also assuming that two register class
// operands are always compatible.
fn collect_reg_uses(&mut self, inst: Inst, ebb: Ebb, constraints: Option<&RecipeConstraints>) {
fn collect_reg_uses(
&mut self,
inst: Inst,
block: Block,
constraints: Option<&RecipeConstraints>,
) {
let args = self.cur.func.dfg.inst_args(inst);
let num_fixed_ins = if let Some(constraints) = constraints {
for (idx, (op, &arg)) in constraints.ins.iter().zip(args).enumerate() {
@@ -324,11 +329,11 @@ impl<'a> Context<'a> {
ConstraintKind::FixedReg(_) => reguse.fixed = true,
ConstraintKind::Tied(_) => {
// A tied operand must kill the used value.
reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout);
reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout);
}
ConstraintKind::FixedTied(_) => {
reguse.fixed = true;
reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout);
reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout);
}
ConstraintKind::Reg => {}
}
@@ -450,10 +455,10 @@ impl<'a> Context<'a> {
// Spill a live register that is *not* used by the current instruction.
// Spilling a use wouldn't help.
//
// Do allow spilling of EBB arguments on branches. This is safe since we spill
// the whole virtual register which includes the matching EBB parameter value
// Do allow spilling of block arguments on branches. This is safe since we spill
// the whole virtual register which includes the matching block parameter value
// at the branch destination. It is also necessary since there can be
// arbitrarily many EBB arguments.
// arbitrarily many block arguments.
match {
let args = if self.cur.func.dfg[inst].opcode().is_branch() {
self.cur.func.dfg.inst_fixed_args(inst)
@@ -572,7 +577,7 @@ impl<'a> Context<'a> {
self.liveness.create_dead(copy, inst, Affinity::Reg(rci));
self.liveness.extend_locally(
copy,
self.cur.func.layout.pp_ebb(inst),
self.cur.func.layout.pp_block(inst),
self.cur.current_inst().expect("must be at an instruction"),
&self.cur.func.layout,
);

View File

@@ -5,11 +5,11 @@
//! output.
//!
//! A virtual register is typically built by merging together SSA values that are "phi-related" -
//! that is, one value is passed as an EBB argument to a branch and the other is the EBB parameter
//! that is, one value is passed as an block argument to a branch and the other is the block parameter
//! value itself.
//!
//! If any values in a virtual register are spilled, they will use the same stack slot. This avoids
//! memory-to-memory copies when a spilled value is passed as an EBB argument.
//! memory-to-memory copies when a spilled value is passed as an block argument.
use crate::dbg::DisplayList;
use crate::dominator_tree::DominatorTreePreorder;