Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -148,22 +148,22 @@ mod tests {
use super::*;
use crate::entity::EntityRef;
/// An opaque reference to an extended basic block in a function.
/// An opaque reference to a basic block in a function.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Ebb(u32);
entity_impl!(Ebb, "ebb");
pub struct Block(u32);
entity_impl!(Block, "block");
#[test]
fn comparator() {
let ebb1 = Ebb::new(1);
let ebb2 = Ebb::new(2);
let ebb3 = Ebb::new(3);
let ebb4 = Ebb::new(4);
let vals = [ebb1, ebb2, ebb4];
let block1 = Block::new(1);
let block2 = Block::new(2);
let block3 = Block::new(3);
let block4 = Block::new(4);
let vals = [block1, block2, block4];
let comp = ();
assert_eq!(comp.search(ebb1, &vals), Ok(0));
assert_eq!(comp.search(ebb3, &vals), Err(2));
assert_eq!(comp.search(ebb4, &vals), Ok(2));
assert_eq!(comp.search(block1, &vals), Ok(0));
assert_eq!(comp.search(block3, &vals), Err(2));
assert_eq!(comp.search(block4, &vals), Ok(2));
}
#[test]

View File

@@ -708,8 +708,8 @@ macro_rules! def {
}
// Helper macro to define legalization recipes.
macro_rules! ebb {
// An basic block definition, splitting the current block in 2.
macro_rules! block {
// a basic block definition, splitting the current block in 2.
($block: ident) => {
ExprBuilder::block($block).assign_to(Vec::new())
};

View File

@@ -112,7 +112,7 @@ pub(crate) struct InstructionContent {
/// Indices in operands_out of output operands that are values.
pub value_results: Vec<usize>,
/// True for instructions that terminate the EBB.
/// True for instructions that terminate the block.
pub is_terminator: bool,
/// True for all branch or jump instructions.
pub is_branch: bool,

View File

@@ -450,7 +450,7 @@ fn gen_opcodes(all_inst: &AllInstructions, fmt: &mut Formatter) {
all_inst,
|inst| inst.is_terminator,
"is_terminator",
"True for instructions that terminate the EBB",
"True for instructions that terminate the block",
fmt,
);
gen_bool_accessor(

View File

@@ -474,7 +474,7 @@ fn gen_transform<'a>(
// If we are adding some blocks, we need to recall the original block, such that we can
// recompute it.
if !transform.block_pool.is_empty() {
fmt.line("let orig_ebb = pos.current_ebb().unwrap();");
fmt.line("let orig_block = pos.current_block().unwrap();");
}
// If we're going to delete `inst`, we need to detach its results first so they can be
@@ -486,14 +486,14 @@ fn gen_transform<'a>(
// Emit new block creation.
for block in &transform.block_pool {
let var = transform.var_pool.get(block.name);
fmtln!(fmt, "let {} = pos.func.dfg.make_ebb();", var.name);
fmtln!(fmt, "let {} = pos.func.dfg.make_block();", var.name);
}
// Emit the destination pattern.
for &def_index in &transform.dst {
if let Some(block) = transform.block_pool.get(def_index) {
let var = transform.var_pool.get(block.name);
fmtln!(fmt, "pos.insert_ebb({});", var.name);
fmtln!(fmt, "pos.insert_block({});", var.name);
}
emit_dst_inst(
transform.def_pool.get(def_index),
@@ -507,7 +507,7 @@ fn gen_transform<'a>(
let def_next_index = transform.def_pool.next_index();
if let Some(block) = transform.block_pool.get(def_next_index) {
let var = transform.var_pool.get(block.name);
fmtln!(fmt, "pos.insert_ebb({});", var.name);
fmtln!(fmt, "pos.insert_block({});", var.name);
}
// Delete the original instruction if we didn't have an opportunity to replace it.
@@ -520,14 +520,14 @@ fn gen_transform<'a>(
if transform.def_pool.get(transform.src).apply.inst.is_branch {
// A branch might have been legalized into multiple branches, so we need to recompute
// the cfg.
fmt.line("cfg.recompute_ebb(pos.func, pos.current_ebb().unwrap());");
fmt.line("cfg.recompute_block(pos.func, pos.current_block().unwrap());");
}
} else {
// Update CFG for the new blocks.
fmt.line("cfg.recompute_ebb(pos.func, orig_ebb);");
fmt.line("cfg.recompute_block(pos.func, orig_block);");
for block in &transform.block_pool {
let var = transform.var_pool.get(block.name);
fmtln!(fmt, "cfg.recompute_ebb(pos.func, {});", var.name);
fmtln!(fmt, "cfg.recompute_block(pos.func, {});", var.name);
}
}

View File

@@ -6,9 +6,9 @@ fn new(format_field_name: &'static str, rust_type: &'static str, doc: &'static s
}
pub(crate) struct EntityRefs {
/// A reference to an extended basic block in the same function.
/// A reference to a basic block in the same function.
/// This is primarliy used in control flow instructions.
pub(crate) ebb: OperandKind,
pub(crate) block: OperandKind,
/// A reference to a stack slot declared in the function preamble.
pub(crate) stack_slot: OperandKind,
@@ -33,17 +33,17 @@ pub(crate) struct EntityRefs {
/// A reference to a table declared in the function preamble.
pub(crate) table: OperandKind,
/// A variable-sized list of value operands. Use for Ebb and function call arguments.
/// A variable-sized list of value operands. Use for Block and function call arguments.
pub(crate) varargs: OperandKind,
}
impl EntityRefs {
pub fn new() -> Self {
Self {
ebb: new(
block: new(
"destination",
"ir::Ebb",
"An extended basic block in the same function.",
"ir::Block",
"a basic block in the same function.",
),
stack_slot: new("stack_slot", "ir::StackSlot", "A stack slot"),
@@ -64,7 +64,7 @@ impl EntityRefs {
A variable size list of `value` operands.
Use this to represent arguments passed to a function call, arguments
passed to an extended basic block, or a variable number of results
passed to a basic block, or a variable number of results
returned from an instruction.
"#,
),

View File

@@ -140,25 +140,25 @@ impl Formats {
.value()
.build(),
jump: Builder::new("Jump").imm(&entities.ebb).varargs().build(),
jump: Builder::new("Jump").imm(&entities.block).varargs().build(),
branch: Builder::new("Branch")
.value()
.imm(&entities.ebb)
.imm(&entities.block)
.varargs()
.build(),
branch_int: Builder::new("BranchInt")
.imm(&imm.intcc)
.value()
.imm(&entities.ebb)
.imm(&entities.block)
.varargs()
.build(),
branch_float: Builder::new("BranchFloat")
.imm(&imm.floatcc)
.value()
.imm(&entities.ebb)
.imm(&entities.block)
.varargs()
.build(),
@@ -166,13 +166,13 @@ impl Formats {
.imm(&imm.intcc)
.value()
.value()
.imm(&entities.ebb)
.imm(&entities.block)
.varargs()
.build(),
branch_table: Builder::new("BranchTable")
.value()
.imm(&entities.ebb)
.imm(&entities.block)
.imm(&entities.jump_table)
.build(),

View File

@@ -18,8 +18,8 @@ fn define_control_flow(
imm: &Immediates,
entities: &EntityRefs,
) {
let EBB = &Operand::new("EBB", &entities.ebb).with_doc("Destination extended basic block");
let args = &Operand::new("args", &entities.varargs).with_doc("EBB arguments");
let block = &Operand::new("block", &entities.block).with_doc("Destination basic block");
let args = &Operand::new("args", &entities.varargs).with_doc("block arguments");
ig.push(
Inst::new(
@@ -27,13 +27,13 @@ fn define_control_flow(
r#"
Jump.
Unconditionally jump to an extended basic block, passing the specified
EBB arguments. The number and types of arguments must match the
destination EBB.
Unconditionally jump to a basic block, passing the specified
block arguments. The number and types of arguments must match the
destination block.
"#,
&formats.jump,
)
.operands_in(vec![EBB, args])
.operands_in(vec![block, args])
.is_terminator(true)
.is_branch(true),
);
@@ -42,9 +42,9 @@ fn define_control_flow(
Inst::new(
"fallthrough",
r#"
Fall through to the next EBB.
Fall through to the next block.
This is the same as `jump`, except the destination EBB must be
This is the same as `jump`, except the destination block must be
the next one in the layout.
Jumps are turned into fall-through instructions by the branch
@@ -53,7 +53,7 @@ fn define_control_flow(
"#,
&formats.jump,
)
.operands_in(vec![EBB, args])
.operands_in(vec![block, args])
.is_terminator(true)
.is_branch(true),
);
@@ -81,7 +81,7 @@ fn define_control_flow(
"#,
&formats.branch,
)
.operands_in(vec![c, EBB, args])
.operands_in(vec![c, block, args])
.is_branch(true),
);
@@ -96,7 +96,7 @@ fn define_control_flow(
"#,
&formats.branch,
)
.operands_in(vec![c, EBB, args])
.operands_in(vec![c, block, args])
.is_branch(true),
);
}
@@ -124,14 +124,14 @@ fn define_control_flow(
and take the branch if the condition is true:
```text
br_icmp ugt v1, v2, ebb4(v5, v6)
br_icmp ugt v1, v2, block4(v5, v6)
```
is semantically equivalent to:
```text
v10 = icmp ugt, v1, v2
brnz v10, ebb4(v5, v6)
brnz v10, block4(v5, v6)
```
Some RISC architectures like MIPS and RISC-V provide instructions that
@@ -140,7 +140,7 @@ fn define_control_flow(
"#,
&formats.branch_icmp,
)
.operands_in(vec![Cond, x, y, EBB, args])
.operands_in(vec![Cond, x, y, block, args])
.is_branch(true),
);
@@ -154,7 +154,7 @@ fn define_control_flow(
"#,
&formats.branch_int,
)
.operands_in(vec![Cond, f, EBB, args])
.operands_in(vec![Cond, f, block, args])
.is_branch(true),
);
}
@@ -172,7 +172,7 @@ fn define_control_flow(
"#,
&formats.branch_float,
)
.operands_in(vec![Cond, f, EBB, args])
.operands_in(vec![Cond, f, block, args])
.is_branch(true),
);
}
@@ -188,8 +188,8 @@ fn define_control_flow(
Indirect branch via jump table.
Use ``x`` as an unsigned index into the jump table ``JT``. If a jump
table entry is found, branch to the corresponding EBB. If no entry was
found or the index is out-of-bounds, branch to the given default EBB.
table entry is found, branch to the corresponding block. If no entry was
found or the index is out-of-bounds, branch to the given default block.
Note that this branch instruction can't pass arguments to the targeted
blocks. Split critical edges as needed to work around this.
@@ -202,7 +202,7 @@ fn define_control_flow(
"#,
&formats.branch_table,
)
.operands_in(vec![x, EBB, JT])
.operands_in(vec![x, block, JT])
.is_terminator(true)
.is_branch(true),
);
@@ -1407,7 +1407,7 @@ pub(crate) fn define(
satisfy instruction constraints.
The register diversions created by this instruction must be undone
before the value leaves the EBB. At the entry to a new EBB, all live
before the value leaves the block. At the entry to a new block, all live
values must be in their originally assigned registers.
"#,
&formats.reg_move,

View File

@@ -197,9 +197,9 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro
let al = var("al");
let ah = var("ah");
let cc = var("cc");
let ebb = var("ebb");
let ebb1 = var("ebb1");
let ebb2 = var("ebb2");
let block = var("block");
let block1 = var("block1");
let block2 = var("block2");
let ptr = var("ptr");
let flags = var("flags");
let offset = var("off");
@@ -269,7 +269,7 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro
);
narrow.legalize(
def!(brz.I128(x, ebb, vararg)),
def!(brz.I128(x, block, vararg)),
vec![
def!((xl, xh) = isplit(x)),
def!(
@@ -287,18 +287,18 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro
)
),
def!(c = band(a, b)),
def!(brnz(c, ebb, vararg)),
def!(brnz(c, block, vararg)),
],
);
narrow.legalize(
def!(brnz.I128(x, ebb1, vararg)),
def!(brnz.I128(x, block1, vararg)),
vec![
def!((xl, xh) = isplit(x)),
def!(brnz(xl, ebb1, vararg)),
def!(jump(ebb2, Literal::empty_vararg())),
ebb!(ebb2),
def!(brnz(xh, ebb1, vararg)),
def!(brnz(xl, block1, vararg)),
def!(jump(block2, Literal::empty_vararg())),
block!(block2),
def!(brnz(xh, block1, vararg)),
],
);
@@ -619,13 +619,13 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro
for &ty in &[I8, I16] {
widen.legalize(
def!(brz.ty(x, ebb, vararg)),
vec![def!(a = uextend.I32(x)), def!(brz(a, ebb, vararg))],
def!(brz.ty(x, block, vararg)),
vec![def!(a = uextend.I32(x)), def!(brz(a, block, vararg))],
);
widen.legalize(
def!(brnz.ty(x, ebb, vararg)),
vec![def!(a = uextend.I32(x)), def!(brnz(a, ebb, vararg))],
def!(brnz.ty(x, block, vararg)),
vec![def!(a = uextend.I32(x)), def!(brnz(a, block, vararg))],
);
}

View File

@@ -135,7 +135,7 @@ pub fn legalize_args<AA: ArgAssigner>(args: &[AbiParam], aa: &mut AA) -> Option<
///
/// The legalizer needs to repair the values at all ABI boundaries:
///
/// - Incoming function arguments to the entry EBB.
/// - Incoming function arguments to the entry block.
/// - Function arguments passed to a call.
/// - Return values from a call.
/// - Return values passed to a return instruction.

View File

@@ -74,8 +74,8 @@ impl<'a> MemoryCodeSink<'a> {
/// A trait for receiving relocations for code that is emitted directly into memory.
pub trait RelocSink {
/// Add a relocation referencing an EBB at the current offset.
fn reloc_ebb(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an block at the current offset.
fn reloc_block(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an external symbol at the current offset.
fn reloc_external(&mut self, _: CodeOffset, _: Reloc, _: &ExternalName, _: Addend);
@@ -127,9 +127,9 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
self.write(x);
}
fn reloc_ebb(&mut self, rel: Reloc, ebb_offset: CodeOffset) {
fn reloc_block(&mut self, rel: Reloc, block_offset: CodeOffset) {
let ofs = self.offset();
self.relocs.reloc_ebb(ofs, rel, ebb_offset);
self.relocs.reloc_block(ofs, rel, block_offset);
}
fn reloc_external(&mut self, rel: Reloc, name: &ExternalName, addend: Addend) {
@@ -177,7 +177,7 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
pub struct NullRelocSink {}
impl RelocSink for NullRelocSink {
fn reloc_ebb(&mut self, _: u32, _: Reloc, _: u32) {}
fn reloc_block(&mut self, _: u32, _: Reloc, _: u32) {}
fn reloc_external(&mut self, _: u32, _: Reloc, _: &ExternalName, _: i64) {}
fn reloc_constant(&mut self, _: CodeOffset, _: Reloc, _: ConstantOffset) {}
fn reloc_jt(&mut self, _: u32, _: Reloc, _: JumpTable) {}

View File

@@ -127,8 +127,8 @@ pub trait CodeSink {
/// Add 8 bytes to the code section.
fn put8(&mut self, _: u64);
/// Add a relocation referencing an EBB at the current offset.
fn reloc_ebb(&mut self, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an block at the current offset.
fn reloc_block(&mut self, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an external symbol plus the addend at the current offset.
fn reloc_external(&mut self, _: Reloc, _: &ExternalName, _: Addend);
@@ -205,10 +205,10 @@ where
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS, &dyn TargetIsa),
{
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
divert.at_ebb(&func.entry_diversions, ebb);
debug_assert_eq!(func.offsets[ebb], sink.offset());
for inst in func.layout.ebb_insts(ebb) {
for block in func.layout.blocks() {
divert.at_block(&func.entry_diversions, block);
debug_assert_eq!(func.offsets[block], sink.offset());
for inst in func.layout.block_insts(block) {
emit_inst(func, inst, &mut divert, sink, isa);
}
}
@@ -218,8 +218,8 @@ where
// Output jump tables.
for (jt, jt_data) in func.jump_tables.iter() {
let jt_offset = func.jt_offsets[jt];
for ebb in jt_data.iter() {
let rel_offset: i32 = func.offsets[*ebb] as i32 - jt_offset as i32;
for block in jt_data.iter() {
let rel_offset: i32 = func.offsets[*block] as i32 - jt_offset as i32;
sink.put4(rel_offset as u32)
}
}

View File

@@ -1,9 +1,9 @@
//! Branch relaxation and offset computation.
//!
//! # EBB header offsets
//! # block header offsets
//!
//! Before we can generate binary machine code for branch instructions, we need to know the final
//! offsets of all the EBB headers in the function. This information is encoded in the
//! offsets of all the block headers in the function. This information is encoded in the
//! `func.offsets` table.
//!
//! # Branch relaxation
@@ -16,22 +16,22 @@
//! unconditional branches:
//!
//! ```clif
//! brz v1, ebb17
//! brz v1, block17
//! ```
//!
//! can be transformed into:
//!
//! ```clif
//! brnz v1, ebb23
//! jump ebb17
//! ebb23:
//! brnz v1, block23
//! jump block17
//! block23:
//! ```
use crate::binemit::{CodeInfo, CodeOffset};
use crate::cursor::{Cursor, FuncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueList};
use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueList};
use crate::isa::{EncInfo, TargetIsa};
use crate::iterators::IteratorExtras;
use crate::regalloc::RegDiversions;
@@ -40,7 +40,7 @@ use crate::CodegenResult;
use core::convert::TryFrom;
use log::debug;
/// Relax branches and compute the final layout of EBB headers in `func`.
/// Relax branches and compute the final layout of block headers in `func`.
///
/// Fill in the `func.offsets` table so the function is ready for binary emission.
pub fn relax_branches(
@@ -53,9 +53,9 @@ pub fn relax_branches(
let encinfo = isa.encoding_info();
// Clear all offsets so we can recognize EBBs that haven't been visited yet.
// Clear all offsets so we can recognize blocks that haven't been visited yet.
func.offsets.clear();
func.offsets.resize(func.dfg.num_ebbs());
func.offsets.resize(func.dfg.num_blocks());
// Start by removing redundant jumps.
fold_redundant_jumps(func, _cfg, _domtree);
@@ -66,12 +66,12 @@ pub fn relax_branches(
let mut offset = 0;
let mut divert = RegDiversions::new();
// First, compute initial offsets for every EBB.
// First, compute initial offsets for every block.
{
let mut cur = FuncCursor::new(func);
while let Some(ebb) = cur.next_ebb() {
divert.at_ebb(&cur.func.entry_diversions, ebb);
cur.func.offsets[ebb] = offset;
while let Some(block) = cur.next_block() {
divert.at_block(&cur.func.entry_diversions, block);
cur.func.offsets[block] = offset;
while let Some(inst) = cur.next_inst() {
divert.apply(&cur.func.dfg[inst]);
let enc = cur.func.encodings[inst];
@@ -88,12 +88,12 @@ pub fn relax_branches(
// Visit all instructions in layout order.
let mut cur = FuncCursor::new(func);
while let Some(ebb) = cur.next_ebb() {
divert.at_ebb(&cur.func.entry_diversions, ebb);
while let Some(block) = cur.next_block() {
divert.at_block(&cur.func.entry_diversions, block);
// Record the offset for `ebb` and make sure we iterate until offsets are stable.
if cur.func.offsets[ebb] != offset {
cur.func.offsets[ebb] = offset;
// Record the offset for `block` and make sure we iterate until offsets are stable.
if cur.func.offsets[block] != offset {
cur.func.offsets[block] = offset;
go_again = true;
}
@@ -153,21 +153,21 @@ pub fn relax_branches(
fn try_fold_redundant_jump(
func: &mut Function,
cfg: &mut ControlFlowGraph,
ebb: Ebb,
block: Block,
first_inst: Inst,
) -> bool {
let first_dest = match func.dfg[first_inst].branch_destination() {
Some(ebb) => ebb, // The instruction was a single-target branch.
Some(block) => block, // The instruction was a single-target branch.
None => {
return false; // The instruction was either multi-target or not a branch.
}
};
// For the moment, only attempt to fold a branch to an ebb that is parameterless.
// For the moment, only attempt to fold a branch to an block that is parameterless.
// These blocks are mainly produced by critical edge splitting.
//
// TODO: Allow folding blocks that define SSA values and function as phi nodes.
if func.dfg.num_ebb_params(first_dest) != 0 {
if func.dfg.num_block_params(first_dest) != 0 {
return false;
}
@@ -178,7 +178,7 @@ fn try_fold_redundant_jump(
return false;
}
// Now we need to fix up first_inst's ebb parameters to match second_inst's,
// Now we need to fix up first_inst's block parameters to match second_inst's,
// without changing the branch-specific arguments.
//
// The intermediary block is allowed to reference any SSA value that dominates it,
@@ -208,14 +208,14 @@ fn try_fold_redundant_jump(
// was a block parameter, rewrite it to refer to the value that the first jump
// passed in its parameters. Otherwise, make sure it dominates first_inst.
//
// For example: if we `ebb0: jump ebb1(v1)` to `ebb1(v2): jump ebb2(v2)`,
// we want to rewrite the original jump to `jump ebb2(v1)`.
let ebb_params: &[Value] = func.dfg.ebb_params(first_dest);
debug_assert!(ebb_params.len() == first_params.len());
// For example: if we `block0: jump block1(v1)` to `block1(v2): jump block2(v2)`,
// we want to rewrite the original jump to `jump block2(v1)`.
let block_params: &[Value] = func.dfg.block_params(first_dest);
debug_assert!(block_params.len() == first_params.len());
for value in second_params.iter_mut() {
if let Some((n, _)) = ebb_params.iter().enumerate().find(|(_, &p)| p == *value) {
// This value was the Nth parameter passed to the second_inst's ebb.
if let Some((n, _)) = block_params.iter().enumerate().find(|(_, &p)| p == *value) {
// This value was the Nth parameter passed to the second_inst's block.
// Rewrite it as the Nth parameter passed by first_inst.
*value = first_params[n];
}
@@ -233,21 +233,21 @@ fn try_fold_redundant_jump(
func.dfg[first_inst].put_value_list(value_list); // Put the new list.
// Bypass the second jump.
// This can disconnect the Ebb containing `second_inst`, to be cleaned up later.
// This can disconnect the Block containing `second_inst`, to be cleaned up later.
let second_dest = func.dfg[second_inst].branch_destination().expect("Dest");
func.change_branch_destination(first_inst, second_dest);
cfg.recompute_ebb(func, ebb);
cfg.recompute_block(func, block);
// The previously-intermediary Ebb may now be unreachable. Update CFG.
// The previously-intermediary Block may now be unreachable. Update CFG.
if cfg.pred_iter(first_dest).count() == 0 {
// Remove all instructions from that ebb.
// Remove all instructions from that block.
while let Some(inst) = func.layout.first_inst(first_dest) {
func.layout.remove_inst(inst);
}
// Remove the block...
cfg.recompute_ebb(func, first_dest); // ...from predecessor lists.
func.layout.remove_ebb(first_dest); // ...from the layout.
cfg.recompute_block(func, first_dest); // ...from predecessor lists.
func.layout.remove_block(first_dest); // ...from the layout.
}
true
@@ -264,14 +264,17 @@ fn fold_redundant_jumps(
// Postorder iteration guarantees that a chain of jumps is visited from
// the end of the chain to the start of the chain.
for &ebb in domtree.cfg_postorder() {
for &block in domtree.cfg_postorder() {
// Only proceed if the first terminator instruction is a single-target branch.
let first_inst = func.layout.last_inst(ebb).expect("Ebb has no terminator");
folded |= try_fold_redundant_jump(func, cfg, ebb, first_inst);
let first_inst = func
.layout
.last_inst(block)
.expect("Block has no terminator");
folded |= try_fold_redundant_jump(func, cfg, block, first_inst);
// Also try the previous instruction.
if let Some(prev_inst) = func.layout.prev_inst(first_inst) {
folded |= try_fold_redundant_jump(func, cfg, ebb, prev_inst);
folded |= try_fold_redundant_jump(func, cfg, block, prev_inst);
}
}
@@ -284,8 +287,11 @@ fn fold_redundant_jumps(
/// Convert `jump` instructions to `fallthrough` instructions where possible and verify that any
/// existing `fallthrough` instructions are correct.
fn fallthroughs(func: &mut Function) {
for (ebb, succ) in func.layout.ebbs().adjacent_pairs() {
let term = func.layout.last_inst(ebb).expect("EBB has no terminator.");
for (block, succ) in func.layout.blocks().adjacent_pairs() {
let term = func
.layout
.last_inst(block)
.expect("block has no terminator.");
if let InstructionData::Jump {
ref mut opcode,
destination,
@@ -296,10 +302,10 @@ fn fallthroughs(func: &mut Function) {
Opcode::Fallthrough => {
// Somebody used a fall-through instruction before the branch relaxation pass.
// Make sure it is correct, i.e. the destination is the layout successor.
debug_assert_eq!(destination, succ, "Illegal fall-through in {}", ebb)
debug_assert_eq!(destination, succ, "Illegal fall-through in {}", block)
}
Opcode::Jump => {
// If this is a jump to the successor EBB, change it to a fall-through.
// If this is a jump to the successor block, change it to a fall-through.
if destination == succ {
*opcode = Opcode::Fallthrough;
func.encodings[term] = Default::default();
@@ -368,18 +374,18 @@ fn relax_branch(
// branches, so one way of extending the range of a conditional branch is to invert its
// condition and make it branch over an unconditional jump which has the larger range.
//
// Splitting the EBB is problematic this late because there may be register diversions in
// Splitting the block is problematic this late because there may be register diversions in
// effect across the conditional branch, and they can't survive the control flow edge to a new
// EBB. We have two options for handling that:
// block. We have two options for handling that:
//
// 1. Set a flag on the new EBB that indicates it wants the preserve the register diversions of
// 1. Set a flag on the new block that indicates it wants the preserve the register diversions of
// its layout predecessor, or
// 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the EBB.
// 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the block.
//
// It seems that 1. would allow us to share code among RISC ISAs that need this.
//
// We can't allow register diversions to survive from the layout predecessor because the layout
// predecessor could contain kill points for some values that are live in this EBB, and
// predecessor could contain kill points for some values that are live in this block, and
// diversions are not automatically cancelled when the live range of a value ends.
// This assumes solution 2. above:

View File

@@ -19,11 +19,11 @@ pub fn shrink_instructions(func: &mut Function, isa: &dyn TargetIsa) {
let encinfo = isa.encoding_info();
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
for block in func.layout.blocks() {
// Load diversions from predecessors.
divert.at_ebb(&func.entry_diversions, ebb);
divert.at_block(&func.entry_diversions, block);
for inst in func.layout.ebb_insts(ebb) {
for inst in func.layout.block_insts(block) {
let enc = func.encodings[inst];
if enc.is_legal() {
// regmove/regfill/regspill are special instructions with register immediates

View File

@@ -4,7 +4,7 @@ use alloc::vec::Vec;
use core::fmt::{Display, Formatter, Result, Write};
use crate::entity::SecondaryMap;
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::Function;
use crate::write::{FuncWriter, PlainWriter};
@@ -27,7 +27,7 @@ impl<'a> CFGPrinter<'a> {
/// Write the CFG for this function to `w`.
pub fn write(&self, w: &mut dyn Write) -> Result {
self.header(w)?;
self.ebb_nodes(w)?;
self.block_nodes(w)?;
self.cfg_connections(w)?;
writeln!(w, "}}")
}
@@ -40,7 +40,7 @@ impl<'a> CFGPrinter<'a> {
Ok(())
}
fn ebb_nodes(&self, w: &mut dyn Write) -> Result {
fn block_nodes(&self, w: &mut dyn Write) -> Result {
let mut aliases = SecondaryMap::<_, Vec<_>>::new();
for v in self.func.dfg.values() {
// VADFS returns the immediate target of an alias
@@ -49,11 +49,11 @@ impl<'a> CFGPrinter<'a> {
}
}
for ebb in &self.func.layout {
write!(w, " {} [shape=record, label=\"{{", ebb)?;
crate::write::write_ebb_header(w, self.func, None, ebb, 4)?;
for block in &self.func.layout {
write!(w, " {} [shape=record, label=\"{{", block)?;
crate::write::write_block_header(w, self.func, None, block, 4)?;
// Add all outgoing branch instructions to the label.
for inst in self.func.layout.ebb_insts(ebb) {
for inst in self.func.layout.block_insts(block) {
write!(w, " | <{}>", inst)?;
PlainWriter.write_instruction(w, self.func, &aliases, None, inst, 0)?;
}
@@ -63,9 +63,13 @@ impl<'a> CFGPrinter<'a> {
}
fn cfg_connections(&self, w: &mut dyn Write) -> Result {
for ebb in &self.func.layout {
for BasicBlock { ebb: parent, inst } in self.cfg.pred_iter(ebb) {
writeln!(w, " {}:{} -> {}", parent, inst, ebb)?;
for block in &self.func.layout {
for BlockPredecessor {
block: parent,
inst,
} in self.cfg.pred_iter(block)
{
writeln!(w, " {}:{} -> {}", parent, inst, block)?;
}
}
Ok(())

View File

@@ -13,12 +13,12 @@ pub enum CursorPosition {
/// Cursor is pointing at an existing instruction.
/// New instructions will be inserted *before* the current instruction.
At(ir::Inst),
/// Cursor is before the beginning of an EBB. No instructions can be inserted. Calling
/// `next_inst()` will move to the first instruction in the EBB.
Before(ir::Ebb),
/// Cursor is pointing after the end of an EBB.
/// New instructions will be appended to the EBB.
After(ir::Ebb),
/// Cursor is before the beginning of an block. No instructions can be inserted. Calling
/// `next_inst()` will move to the first instruction in the block.
Before(ir::Block),
/// Cursor is pointing after the end of an block.
/// New instructions will be appended to the block.
After(ir::Block),
}
/// All cursor types implement the `Cursor` which provides common navigation operations.
@@ -46,7 +46,7 @@ pub trait Cursor {
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, SourceLoc};
/// # use cranelift_codegen::ir::{Function, Block, SourceLoc};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, srcloc: SourceLoc) {
/// let mut pos = FuncCursor::new(func).with_srcloc(srcloc);
@@ -76,7 +76,7 @@ pub trait Cursor {
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, inst: Inst) {
/// let mut pos = FuncCursor::new(func).at_inst(inst);
@@ -92,68 +92,68 @@ pub trait Cursor {
self
}
/// Rebuild this cursor positioned at the first insertion point for `ebb`.
/// Rebuild this cursor positioned at the first insertion point for `block`.
/// This differs from `at_first_inst` in that it doesn't assume that any
/// instructions have been inserted into `ebb` yet.
/// instructions have been inserted into `block` yet.
///
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, ebb: Ebb) {
/// let mut pos = FuncCursor::new(func).at_first_insertion_point(ebb);
/// fn edit_func(func: &mut Function, block: Block) {
/// let mut pos = FuncCursor::new(func).at_first_insertion_point(block);
///
/// // Use `pos`...
/// }
/// ```
fn at_first_insertion_point(mut self, ebb: ir::Ebb) -> Self
fn at_first_insertion_point(mut self, block: ir::Block) -> Self
where
Self: Sized,
{
self.goto_first_insertion_point(ebb);
self.goto_first_insertion_point(block);
self
}
/// Rebuild this cursor positioned at the first instruction in `ebb`.
/// Rebuild this cursor positioned at the first instruction in `block`.
///
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, ebb: Ebb) {
/// let mut pos = FuncCursor::new(func).at_first_inst(ebb);
/// fn edit_func(func: &mut Function, block: Block) {
/// let mut pos = FuncCursor::new(func).at_first_inst(block);
///
/// // Use `pos`...
/// }
/// ```
fn at_first_inst(mut self, ebb: ir::Ebb) -> Self
fn at_first_inst(mut self, block: ir::Block) -> Self
where
Self: Sized,
{
self.goto_first_inst(ebb);
self.goto_first_inst(block);
self
}
/// Rebuild this cursor positioned at the last instruction in `ebb`.
/// Rebuild this cursor positioned at the last instruction in `block`.
///
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, ebb: Ebb) {
/// let mut pos = FuncCursor::new(func).at_last_inst(ebb);
/// fn edit_func(func: &mut Function, block: Block) {
/// let mut pos = FuncCursor::new(func).at_last_inst(block);
///
/// // Use `pos`...
/// }
/// ```
fn at_last_inst(mut self, ebb: ir::Ebb) -> Self
fn at_last_inst(mut self, block: ir::Block) -> Self
where
Self: Sized,
{
self.goto_last_inst(ebb);
self.goto_last_inst(block);
self
}
@@ -162,7 +162,7 @@ pub trait Cursor {
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, inst: Inst) {
/// let mut pos = FuncCursor::new(func).after_inst(inst);
@@ -178,55 +178,55 @@ pub trait Cursor {
self
}
/// Rebuild this cursor positioned at the top of `ebb`.
/// Rebuild this cursor positioned at the top of `block`.
///
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, ebb: Ebb) {
/// let mut pos = FuncCursor::new(func).at_top(ebb);
/// fn edit_func(func: &mut Function, block: Block) {
/// let mut pos = FuncCursor::new(func).at_top(block);
///
/// // Use `pos`...
/// }
/// ```
fn at_top(mut self, ebb: ir::Ebb) -> Self
fn at_top(mut self, block: ir::Block) -> Self
where
Self: Sized,
{
self.goto_top(ebb);
self.goto_top(block);
self
}
/// Rebuild this cursor positioned at the bottom of `ebb`.
/// Rebuild this cursor positioned at the bottom of `block`.
///
/// This is intended to be used as a builder method:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb, Inst};
/// # use cranelift_codegen::ir::{Function, Block, Inst};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function, ebb: Ebb) {
/// let mut pos = FuncCursor::new(func).at_bottom(ebb);
/// fn edit_func(func: &mut Function, block: Block) {
/// let mut pos = FuncCursor::new(func).at_bottom(block);
///
/// // Use `pos`...
/// }
/// ```
fn at_bottom(mut self, ebb: ir::Ebb) -> Self
fn at_bottom(mut self, block: ir::Block) -> Self
where
Self: Sized,
{
self.goto_bottom(ebb);
self.goto_bottom(block);
self
}
/// Get the EBB corresponding to the current position.
fn current_ebb(&self) -> Option<ir::Ebb> {
/// Get the block corresponding to the current position.
fn current_block(&self) -> Option<ir::Block> {
use self::CursorPosition::*;
match self.position() {
Nowhere => None,
At(inst) => self.layout().inst_ebb(inst),
Before(ebb) | After(ebb) => Some(ebb),
At(inst) => self.layout().inst_block(inst),
Before(block) | After(block) => Some(block),
}
}
@@ -242,13 +242,13 @@ pub trait Cursor {
/// Go to the position after a specific instruction, which must be inserted
/// in the layout. New instructions will be inserted after `inst`.
fn goto_after_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_ebb(inst).is_some());
debug_assert!(self.layout().inst_block(inst).is_some());
let new_pos = if let Some(next) = self.layout().next_inst(inst) {
CursorPosition::At(next)
} else {
CursorPosition::After(
self.layout()
.inst_ebb(inst)
.inst_block(inst)
.expect("current instruction removed?"),
)
};
@@ -258,133 +258,133 @@ pub trait Cursor {
/// Go to a specific instruction which must be inserted in the layout.
/// New instructions will be inserted before `inst`.
fn goto_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_ebb(inst).is_some());
debug_assert!(self.layout().inst_block(inst).is_some());
self.set_position(CursorPosition::At(inst));
}
/// Go to the position for inserting instructions at the beginning of `ebb`,
/// Go to the position for inserting instructions at the beginning of `block`,
/// which unlike `goto_first_inst` doesn't assume that any instructions have
/// been inserted into `ebb` yet.
fn goto_first_insertion_point(&mut self, ebb: ir::Ebb) {
if let Some(inst) = self.layout().first_inst(ebb) {
/// been inserted into `block` yet.
fn goto_first_insertion_point(&mut self, block: ir::Block) {
if let Some(inst) = self.layout().first_inst(block) {
self.goto_inst(inst);
} else {
self.goto_bottom(ebb);
self.goto_bottom(block);
}
}
/// Go to the first instruction in `ebb`.
fn goto_first_inst(&mut self, ebb: ir::Ebb) {
let inst = self.layout().first_inst(ebb).expect("Empty EBB");
/// Go to the first instruction in `block`.
fn goto_first_inst(&mut self, block: ir::Block) {
let inst = self.layout().first_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the last instruction in `ebb`.
fn goto_last_inst(&mut self, ebb: ir::Ebb) {
let inst = self.layout().last_inst(ebb).expect("Empty EBB");
/// Go to the last instruction in `block`.
fn goto_last_inst(&mut self, block: ir::Block) {
let inst = self.layout().last_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the top of `ebb` which must be inserted into the layout.
/// Go to the top of `block` which must be inserted into the layout.
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
/// instruction in `ebb`.
fn goto_top(&mut self, ebb: ir::Ebb) {
debug_assert!(self.layout().is_ebb_inserted(ebb));
self.set_position(CursorPosition::Before(ebb));
/// instruction in `block`.
fn goto_top(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::Before(block));
}
/// Go to the bottom of `ebb` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `ebb`.
fn goto_bottom(&mut self, ebb: ir::Ebb) {
debug_assert!(self.layout().is_ebb_inserted(ebb));
self.set_position(CursorPosition::After(ebb));
/// Go to the bottom of `block` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `block`.
fn goto_bottom(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::After(block));
}
/// Go to the top of the next EBB in layout order and return it.
/// Go to the top of the next block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the top of the first EBB in the
/// - If the cursor wasn't pointing at anything, go to the top of the first block in the
/// function.
/// - If there are no more EBBs, leave the cursor pointing at nothing and return `None`.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `next_ebb()` method is intended for iterating over the EBBs in layout order:
/// The `next_block()` method is intended for iterating over the blocks in layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb};
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(ebb) = cursor.next_ebb() {
/// // Edit ebb.
/// while let Some(block) = cursor.next_block() {
/// // Edit block.
/// }
/// }
/// ```
fn next_ebb(&mut self) -> Option<ir::Ebb> {
let next = if let Some(ebb) = self.current_ebb() {
self.layout().next_ebb(ebb)
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(ebb) => CursorPosition::Before(ebb),
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
/// Go to the bottom of the previous EBB in layout order and return it.
/// Go to the bottom of the previous block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the bottom of the last EBB in the
/// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the
/// function.
/// - If there are no more EBBs, leave the cursor pointing at nothing and return `None`.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `prev_ebb()` method is intended for iterating over the EBBs in backwards layout order:
/// The `prev_block()` method is intended for iterating over the blocks in backwards layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb};
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(ebb) = cursor.prev_ebb() {
/// // Edit ebb.
/// while let Some(block) = cursor.prev_block() {
/// // Edit block.
/// }
/// }
/// ```
fn prev_ebb(&mut self) -> Option<ir::Ebb> {
let prev = if let Some(ebb) = self.current_ebb() {
self.layout().prev_ebb(ebb)
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_ebb()
self.layout().last_block()
};
self.set_position(match prev {
Some(ebb) => CursorPosition::After(ebb),
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
/// Move to the next instruction in the same EBB and return it.
/// Move to the next instruction in the same block and return it.
///
/// - If the cursor was positioned before an EBB, go to the first instruction in that EBB.
/// - If there are no more instructions in the EBB, go to the `After(ebb)` position and return
/// - If the cursor was positioned before an block, go to the first instruction in that block.
/// - If there are no more instructions in the block, go to the `After(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different EBB.
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `next_inst()` method is intended for iterating over the instructions in an EBB like
/// The `next_inst()` method is intended for iterating over the instructions in an block like
/// this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb};
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_ebb(func: &mut Function, ebb: Ebb) {
/// let mut cursor = FuncCursor::new(func).at_top(ebb);
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_top(block);
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
@@ -395,11 +395,11 @@ pub trait Cursor {
/// Iterating over all the instructions in a function looks like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb};
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(ebb) = cursor.next_ebb() {
/// while let Some(block) = cursor.next_block() {
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
@@ -417,44 +417,44 @@ pub trait Cursor {
} else {
let pos = After(
self.layout()
.inst_ebb(inst)
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
Before(ebb) => {
if let Some(next) = self.layout().first_inst(ebb) {
Before(block) => {
if let Some(next) = self.layout().first_inst(block) {
self.set_position(At(next));
Some(next)
} else {
self.set_position(After(ebb));
self.set_position(After(block));
None
}
}
}
}
/// Move to the previous instruction in the same EBB and return it.
/// Move to the previous instruction in the same block and return it.
///
/// - If the cursor was positioned after an EBB, go to the last instruction in that EBB.
/// - If there are no more instructions in the EBB, go to the `Before(ebb)` position and return
/// - If the cursor was positioned after an block, go to the last instruction in that block.
/// - If there are no more instructions in the block, go to the `Before(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different EBB.
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `prev_inst()` method is intended for iterating backwards over the instructions in an
/// EBB like this:
/// block like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Ebb};
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_ebb(func: &mut Function, ebb: Ebb) {
/// let mut cursor = FuncCursor::new(func).at_bottom(ebb);
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_bottom(block);
/// while let Some(inst) = cursor.prev_inst() {
/// // Edit instructions...
/// }
@@ -471,19 +471,19 @@ pub trait Cursor {
} else {
let pos = Before(
self.layout()
.inst_ebb(inst)
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
After(ebb) => {
if let Some(prev) = self.layout().last_inst(ebb) {
After(block) => {
if let Some(prev) = self.layout().last_inst(block) {
self.set_position(At(prev));
Some(prev)
} else {
self.set_position(Before(ebb));
self.set_position(Before(block));
None
}
}
@@ -494,17 +494,17 @@ pub trait Cursor {
///
/// - If pointing at an instruction, the new instruction is inserted before the current
/// instruction.
/// - If pointing at the bottom of an EBB, the new instruction is appended to the EBB.
/// - If pointing at the bottom of an block, the new instruction is appended to the block.
/// - Otherwise panic.
///
/// In either case, the cursor is not moved, such that repeated calls to `insert_inst()` causes
/// instructions to appear in insertion order in the EBB.
/// instructions to appear in insertion order in the block.
fn insert_inst(&mut self, inst: ir::Inst) {
use self::CursorPosition::*;
match self.position() {
Nowhere | Before(..) => panic!("Invalid insert_inst position"),
At(cur) => self.layout_mut().insert_inst(inst, cur),
After(ebb) => self.layout_mut().append_inst(inst, ebb),
After(block) => self.layout_mut().append_inst(inst, block),
}
}
@@ -532,34 +532,34 @@ pub trait Cursor {
inst
}
/// Insert an EBB at the current position and switch to it.
/// Insert an block at the current position and switch to it.
///
/// As far as possible, this method behaves as if the EBB header were an instruction inserted
/// As far as possible, this method behaves as if the block header were an instruction inserted
/// at the current position.
///
/// - If the cursor is pointing at an existing instruction, *the current EBB is split in two*
/// and the current instruction becomes the first instruction in the inserted EBB.
/// - If the cursor points at the bottom of an EBB, the new EBB is inserted after the current
/// one, and moved to the bottom of the new EBB where instructions can be appended.
/// - If the cursor points to the top of an EBB, the new EBB is inserted above the current one.
/// - If the cursor is not pointing at anything, the new EBB is placed last in the layout.
/// - If the cursor is pointing at an existing instruction, *the current block is split in two*
/// and the current instruction becomes the first instruction in the inserted block.
/// - If the cursor points at the bottom of an block, the new block is inserted after the current
/// one, and moved to the bottom of the new block where instructions can be appended.
/// - If the cursor points to the top of an block, the new block is inserted above the current one.
/// - If the cursor is not pointing at anything, the new block is placed last in the layout.
///
/// This means that it is always valid to call this method, and it always leaves the cursor in
/// a state that will insert instructions into the new EBB.
fn insert_ebb(&mut self, new_ebb: ir::Ebb) {
/// a state that will insert instructions into the new block.
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_ebb(new_ebb, inst);
// All other cases move to `After(ebb)`, but in this case we'll stay `At(inst)`.
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_ebb(new_ebb),
Before(ebb) => self.layout_mut().insert_ebb(new_ebb, ebb),
After(ebb) => self.layout_mut().insert_ebb_after(new_ebb, ebb),
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new EBB.
self.set_position(After(new_ebb));
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
}

View File

@@ -46,8 +46,8 @@ pub fn do_dce(func: &mut Function, domtree: &mut DominatorTree) {
debug_assert!(domtree.is_valid());
let mut live = vec![false; func.dfg.num_values()];
for &ebb in domtree.cfg_postorder() {
let mut pos = FuncCursor::new(func).at_bottom(ebb);
for &block in domtree.cfg_postorder() {
let mut pos = FuncCursor::new(func).at_bottom(block);
while let Some(inst) = pos.prev_inst() {
{
let data = &pos.func.dfg[inst];

View File

@@ -1,9 +1,9 @@
//! A Dominator Tree represented as mappings of Ebbs to their immediate dominator.
//! A Dominator Tree represented as mappings of Blocks to their immediate dominator.
use crate::entity::SecondaryMap;
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::instructions::BranchInfo;
use crate::ir::{Ebb, ExpandedProgramPoint, Function, Inst, Layout, ProgramOrder, Value};
use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Layout, ProgramOrder, Value};
use crate::packed_option::PackedOption;
use crate::timing;
use alloc::vec::Vec;
@@ -19,7 +19,7 @@ const STRIDE: u32 = 4;
const DONE: u32 = 1;
const SEEN: u32 = 2;
/// Dominator tree node. We keep one of these per EBB.
/// Dominator tree node. We keep one of these per block.
#[derive(Clone, Default)]
struct DomNode {
/// Number of this node in a reverse post-order traversal of the CFG, starting from 1.
@@ -28,7 +28,7 @@ struct DomNode {
/// Unreachable nodes get number 0, all others are positive.
rpo_number: u32,
/// The immediate dominator of this EBB, represented as the branch or jump instruction at the
/// The immediate dominator of this block, represented as the branch or jump instruction at the
/// end of the dominating basic block.
///
/// This is `None` for unreachable blocks and the entry block which doesn't have an immediate
@@ -38,53 +38,53 @@ struct DomNode {
/// The dominator tree for a single function.
pub struct DominatorTree {
nodes: SecondaryMap<Ebb, DomNode>,
nodes: SecondaryMap<Block, DomNode>,
/// CFG post-order of all reachable EBBs.
postorder: Vec<Ebb>,
/// CFG post-order of all reachable blocks.
postorder: Vec<Block>,
/// Scratch memory used by `compute_postorder()`.
stack: Vec<Ebb>,
stack: Vec<Block>,
valid: bool,
}
/// Methods for querying the dominator tree.
impl DominatorTree {
/// Is `ebb` reachable from the entry block?
pub fn is_reachable(&self, ebb: Ebb) -> bool {
self.nodes[ebb].rpo_number != 0
/// Is `block` reachable from the entry block?
pub fn is_reachable(&self, block: Block) -> bool {
self.nodes[block].rpo_number != 0
}
/// Get the CFG post-order of EBBs that was used to compute the dominator tree.
/// Get the CFG post-order of blocks that was used to compute the dominator tree.
///
/// Note that this post-order is not updated automatically when the CFG is modified. It is
/// computed from scratch and cached by `compute()`.
pub fn cfg_postorder(&self) -> &[Ebb] {
pub fn cfg_postorder(&self) -> &[Block] {
debug_assert!(self.is_valid());
&self.postorder
}
/// Returns the immediate dominator of `ebb`.
/// Returns the immediate dominator of `block`.
///
/// The immediate dominator of an extended basic block is a basic block which we represent by
/// The immediate dominator of a basic block is a basic block which we represent by
/// the branch or jump instruction at the end of the basic block. This does not have to be the
/// terminator of its EBB.
/// terminator of its block.
///
/// A branch or jump is said to *dominate* `ebb` if all control flow paths from the function
/// entry to `ebb` must go through the branch.
/// A branch or jump is said to *dominate* `block` if all control flow paths from the function
/// entry to `block` must go through the branch.
///
/// The *immediate dominator* is the dominator that is closest to `ebb`. All other dominators
/// The *immediate dominator* is the dominator that is closest to `block`. All other dominators
/// also dominate the immediate dominator.
///
/// This returns `None` if `ebb` is not reachable from the entry EBB, or if it is the entry EBB
/// This returns `None` if `block` is not reachable from the entry block, or if it is the entry block
/// which has no dominators.
pub fn idom(&self, ebb: Ebb) -> Option<Inst> {
self.nodes[ebb].idom.into()
pub fn idom(&self, block: Block) -> Option<Inst> {
self.nodes[block].idom.into()
}
/// Compare two EBBs relative to the reverse post-order.
fn rpo_cmp_ebb(&self, a: Ebb, b: Ebb) -> Ordering {
/// Compare two blocks relative to the reverse post-order.
fn rpo_cmp_block(&self, a: Block, b: Block) -> Ordering {
self.nodes[a].rpo_number.cmp(&self.nodes[b].rpo_number)
}
@@ -93,7 +93,7 @@ impl DominatorTree {
///
/// Return `Ordering::Less` if `a` comes before `b` in the RPO.
///
/// If `a` and `b` belong to the same EBB, compare their relative position in the EBB.
/// If `a` and `b` belong to the same block, compare their relative position in the block.
pub fn rpo_cmp<A, B>(&self, a: A, b: B, layout: &Layout) -> Ordering
where
A: Into<ExpandedProgramPoint>,
@@ -101,7 +101,7 @@ impl DominatorTree {
{
let a = a.into();
let b = b.into();
self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b))
self.rpo_cmp_block(layout.pp_block(a), layout.pp_block(b))
.then(layout.cmp(a, b))
}
@@ -110,7 +110,7 @@ impl DominatorTree {
/// This means that every control-flow path from the function entry to `b` must go through `a`.
///
/// Dominance is ill defined for unreachable blocks. This function can always determine
/// dominance for instructions in the same EBB, but otherwise returns `false` if either block
/// dominance for instructions in the same block, but otherwise returns `false` if either block
/// is unreachable.
///
/// An instruction is considered to dominate itself.
@@ -122,12 +122,14 @@ impl DominatorTree {
let a = a.into();
let b = b.into();
match a {
ExpandedProgramPoint::Ebb(ebb_a) => {
a == b || self.last_dominator(ebb_a, b, layout).is_some()
ExpandedProgramPoint::Block(block_a) => {
a == b || self.last_dominator(block_a, b, layout).is_some()
}
ExpandedProgramPoint::Inst(inst_a) => {
let ebb_a = layout.inst_ebb(inst_a).expect("Instruction not in layout.");
match self.last_dominator(ebb_a, b, layout) {
let block_a = layout
.inst_block(inst_a)
.expect("Instruction not in layout.");
match self.last_dominator(block_a, b, layout) {
Some(last) => layout.cmp(inst_a, last) != Ordering::Greater,
None => false,
}
@@ -137,14 +139,14 @@ impl DominatorTree {
/// Find the last instruction in `a` that dominates `b`.
/// If no instructions in `a` dominate `b`, return `None`.
pub fn last_dominator<B>(&self, a: Ebb, b: B, layout: &Layout) -> Option<Inst>
pub fn last_dominator<B>(&self, a: Block, b: B, layout: &Layout) -> Option<Inst>
where
B: Into<ExpandedProgramPoint>,
{
let (mut ebb_b, mut inst_b) = match b.into() {
ExpandedProgramPoint::Ebb(ebb) => (ebb, None),
let (mut block_b, mut inst_b) = match b.into() {
ExpandedProgramPoint::Block(block) => (block, None),
ExpandedProgramPoint::Inst(inst) => (
layout.inst_ebb(inst).expect("Instruction not in layout."),
layout.inst_block(inst).expect("Instruction not in layout."),
Some(inst),
),
};
@@ -152,15 +154,15 @@ impl DominatorTree {
// Run a finger up the dominator tree from b until we see a.
// Do nothing if b is unreachable.
while rpo_a < self.nodes[ebb_b].rpo_number {
let idom = match self.idom(ebb_b) {
while rpo_a < self.nodes[block_b].rpo_number {
let idom = match self.idom(block_b) {
Some(idom) => idom,
None => return None, // a is unreachable, so we climbed past the entry
};
ebb_b = layout.inst_ebb(idom).expect("Dominator got removed.");
block_b = layout.inst_block(idom).expect("Dominator got removed.");
inst_b = Some(idom);
}
if a == ebb_b {
if a == block_b {
inst_b
} else {
None
@@ -172,25 +174,25 @@ impl DominatorTree {
/// Both basic blocks are assumed to be reachable.
pub fn common_dominator(
&self,
mut a: BasicBlock,
mut b: BasicBlock,
mut a: BlockPredecessor,
mut b: BlockPredecessor,
layout: &Layout,
) -> BasicBlock {
) -> BlockPredecessor {
loop {
match self.rpo_cmp_ebb(a.ebb, b.ebb) {
match self.rpo_cmp_block(a.block, b.block) {
Ordering::Less => {
// `a` comes before `b` in the RPO. Move `b` up.
let idom = self.nodes[b.ebb].idom.expect("Unreachable basic block?");
b = BasicBlock::new(
layout.inst_ebb(idom).expect("Dangling idom instruction"),
let idom = self.nodes[b.block].idom.expect("Unreachable basic block?");
b = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Greater => {
// `b` comes before `a` in the RPO. Move `a` up.
let idom = self.nodes[a.ebb].idom.expect("Unreachable basic block?");
a = BasicBlock::new(
layout.inst_ebb(idom).expect("Dangling idom instruction"),
let idom = self.nodes[a.block].idom.expect("Unreachable basic block?");
a = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
@@ -199,11 +201,11 @@ impl DominatorTree {
}
debug_assert_eq!(
a.ebb, b.ebb,
a.block, b.block,
"Unreachable block passed to common_dominator?"
);
// We're in the same EBB. The common dominator is the earlier instruction.
// We're in the same block. The common dominator is the earlier instruction.
if layout.cmp(a.inst, b.inst) == Ordering::Less {
a
} else {
@@ -226,10 +228,10 @@ impl DominatorTree {
/// Allocate and compute a dominator tree.
pub fn with_function(func: &Function, cfg: &ControlFlowGraph) -> Self {
let ebb_capacity = func.layout.ebb_capacity();
let block_capacity = func.layout.block_capacity();
let mut domtree = Self {
nodes: SecondaryMap::with_capacity(ebb_capacity),
postorder: Vec::with_capacity(ebb_capacity),
nodes: SecondaryMap::with_capacity(block_capacity),
postorder: Vec::with_capacity(block_capacity),
stack: Vec::new(),
valid: false,
};
@@ -266,13 +268,13 @@ impl DominatorTree {
/// Reset all internal data structures and compute a post-order of the control flow graph.
///
/// This leaves `rpo_number == 1` for all reachable EBBs, 0 for unreachable ones.
/// This leaves `rpo_number == 1` for all reachable blocks, 0 for unreachable ones.
fn compute_postorder(&mut self, func: &Function) {
self.clear();
self.nodes.resize(func.dfg.num_ebbs());
self.nodes.resize(func.dfg.num_blocks());
// This algorithm is a depth first traversal (DFT) of the control flow graph, computing a
// post-order of the EBBs that are reachable form the entry block. A DFT post-order is not
// post-order of the blocks that are reachable form the entry block. A DFT post-order is not
// unique. The specific order we get is controlled by two factors:
//
// 1. The order each node's children are visited, and
@@ -280,76 +282,76 @@ impl DominatorTree {
//
// There are two ways of viewing the CFG as a graph:
//
// 1. Each EBB is a node, with outgoing edges for all the branches in the EBB.
// 1. Each block is a node, with outgoing edges for all the branches in the block.
// 2. Each basic block is a node, with outgoing edges for the single branch at the end of
// the BB. (An EBB is a linear sequence of basic blocks).
// the BB. (An block is a linear sequence of basic blocks).
//
// The first graph is a contraction of the second one. We want to compute an EBB post-order
// The first graph is a contraction of the second one. We want to compute an block post-order
// that is compatible both graph interpretations. That is, if you compute a BB post-order
// and then remove those BBs that do not correspond to EBB headers, you get a post-order of
// the EBB graph.
// and then remove those BBs that do not correspond to block headers, you get a post-order of
// the block graph.
//
// Node child order:
//
// In the BB graph, we always go down the fall-through path first and follow the branch
// destination second.
//
// In the EBB graph, this is equivalent to visiting EBB successors in a bottom-up
// order, starting from the destination of the EBB's terminating jump, ending at the
// destination of the first branch in the EBB.
// In the block graph, this is equivalent to visiting block successors in a bottom-up
// order, starting from the destination of the block's terminating jump, ending at the
// destination of the first branch in the block.
//
// Edge pruning:
//
// In the BB graph, we keep an edge to an EBB the first time we visit the *source* side
// of the edge. Any subsequent edges to the same EBB are pruned.
// In the BB graph, we keep an edge to an block the first time we visit the *source* side
// of the edge. Any subsequent edges to the same block are pruned.
//
// The equivalent tree is reached in the EBB graph by keeping the first edge to an EBB
// The equivalent tree is reached in the block graph by keeping the first edge to an block
// in a top-down traversal of the successors. (And then visiting edges in a bottom-up
// order).
//
// This pruning method makes it possible to compute the DFT without storing lots of
// information about the progress through an EBB.
// information about the progress through an block.
// During this algorithm only, use `rpo_number` to hold the following state:
//
// 0: EBB has not yet been reached in the pre-order.
// SEEN: EBB has been pushed on the stack but successors not yet pushed.
// 0: block has not yet been reached in the pre-order.
// SEEN: block has been pushed on the stack but successors not yet pushed.
// DONE: Successors pushed.
match func.layout.entry_block() {
Some(ebb) => {
self.stack.push(ebb);
self.nodes[ebb].rpo_number = SEEN;
Some(block) => {
self.stack.push(block);
self.nodes[block].rpo_number = SEEN;
}
None => return,
}
while let Some(ebb) = self.stack.pop() {
match self.nodes[ebb].rpo_number {
while let Some(block) = self.stack.pop() {
match self.nodes[block].rpo_number {
SEEN => {
// This is the first time we pop the EBB, so we need to scan its successors and
// This is the first time we pop the block, so we need to scan its successors and
// then revisit it.
self.nodes[ebb].rpo_number = DONE;
self.stack.push(ebb);
self.push_successors(func, ebb);
self.nodes[block].rpo_number = DONE;
self.stack.push(block);
self.push_successors(func, block);
}
DONE => {
// This is the second time we pop the EBB, so all successors have been
// This is the second time we pop the block, so all successors have been
// processed.
self.postorder.push(ebb);
self.postorder.push(block);
}
_ => unreachable!(),
}
}
}
/// Push `ebb` successors onto `self.stack`, filtering out those that have already been seen.
/// Push `block` successors onto `self.stack`, filtering out those that have already been seen.
///
/// The successors are pushed in program order which is important to get a split-invariant
/// post-order. Split-invariant means that if an EBB is split in two, we get the same
/// post-order except for the insertion of the new EBB header at the split point.
fn push_successors(&mut self, func: &Function, ebb: Ebb) {
for inst in func.layout.ebb_insts(ebb) {
/// post-order. Split-invariant means that if an block is split in two, we get the same
/// post-order except for the insertion of the new block header at the split point.
fn push_successors(&mut self, func: &Function, block: Block) {
for inst in func.layout.block_insts(block) {
match func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(succ, _) => self.push_if_unseen(succ),
BranchInfo::Table(jt, dest) => {
@@ -365,11 +367,11 @@ impl DominatorTree {
}
}
/// Push `ebb` onto `self.stack` if it has not already been seen.
fn push_if_unseen(&mut self, ebb: Ebb) {
if self.nodes[ebb].rpo_number == 0 {
self.nodes[ebb].rpo_number = SEEN;
self.stack.push(ebb);
/// Push `block` onto `self.stack` if it has not already been seen.
fn push_if_unseen(&mut self, block: Block) {
if self.nodes[block].rpo_number == 0 {
self.nodes[block].rpo_number = SEEN;
self.stack.push(block);
}
}
@@ -378,10 +380,10 @@ impl DominatorTree {
fn compute_domtree(&mut self, func: &Function, cfg: &ControlFlowGraph) {
// During this algorithm, `rpo_number` has the following values:
//
// 0: EBB is not reachable.
// 1: EBB is reachable, but has not yet been visited during the first pass. This is set by
// 0: block is not reachable.
// 1: block is reachable, but has not yet been visited during the first pass. This is set by
// `compute_postorder`.
// 2+: EBB is reachable and has an assigned RPO number.
// 2+: block is reachable and has an assigned RPO number.
// We'll be iterating over a reverse post-order of the CFG, skipping the entry block.
let (entry_block, postorder) = match self.postorder.as_slice().split_last() {
@@ -392,7 +394,7 @@ impl DominatorTree {
// Do a first pass where we assign RPO numbers to all reachable nodes.
self.nodes[entry_block].rpo_number = 2 * STRIDE;
for (rpo_idx, &ebb) in postorder.iter().rev().enumerate() {
for (rpo_idx, &block) in postorder.iter().rev().enumerate() {
// Update the current node and give it an RPO number.
// The entry block got 2, the rest start at 3 by multiples of STRIDE to leave
// room for future dominator tree modifications.
@@ -402,8 +404,8 @@ impl DominatorTree {
//
// Due to the nature of the post-order traversal, every node we visit will have at
// least one predecessor that has previously been visited during this RPO.
self.nodes[ebb] = DomNode {
idom: self.compute_idom(ebb, cfg, &func.layout).into(),
self.nodes[block] = DomNode {
idom: self.compute_idom(block, cfg, &func.layout).into(),
rpo_number: (rpo_idx as u32 + 3) * STRIDE,
}
}
@@ -415,30 +417,30 @@ impl DominatorTree {
let mut changed = true;
while changed {
changed = false;
for &ebb in postorder.iter().rev() {
let idom = self.compute_idom(ebb, cfg, &func.layout).into();
if self.nodes[ebb].idom != idom {
self.nodes[ebb].idom = idom;
for &block in postorder.iter().rev() {
let idom = self.compute_idom(block, cfg, &func.layout).into();
if self.nodes[block].idom != idom {
self.nodes[block].idom = idom;
changed = true;
}
}
}
}
// Compute the immediate dominator for `ebb` using the current `idom` states for the reachable
// Compute the immediate dominator for `block` using the current `idom` states for the reachable
// nodes.
fn compute_idom(&self, ebb: Ebb, cfg: &ControlFlowGraph, layout: &Layout) -> Inst {
// Get an iterator with just the reachable, already visited predecessors to `ebb`.
fn compute_idom(&self, block: Block, cfg: &ControlFlowGraph, layout: &Layout) -> Inst {
// Get an iterator with just the reachable, already visited predecessors to `block`.
// Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't
// been visited yet, 0 for unreachable blocks.
let mut reachable_preds = cfg
.pred_iter(ebb)
.filter(|&BasicBlock { ebb: pred, .. }| self.nodes[pred].rpo_number > 1);
.pred_iter(block)
.filter(|&BlockPredecessor { block: pred, .. }| self.nodes[pred].rpo_number > 1);
// The RPO must visit at least one predecessor before this node.
let mut idom = reachable_preds
.next()
.expect("EBB node must have one reachable predecessor");
.expect("block node must have one reachable predecessor");
for pred in reachable_preds {
idom = self.common_dominator(idom, pred, layout);
@@ -453,25 +455,25 @@ impl DominatorTree {
/// This data structure is computed from a `DominatorTree` and provides:
///
/// - A forward traversable dominator tree through the `children()` iterator.
/// - An ordering of EBBs according to a dominator tree pre-order.
/// - Constant time dominance checks at the EBB granularity.
/// - An ordering of blocks according to a dominator tree pre-order.
/// - Constant time dominance checks at the block granularity.
///
/// The information in this auxiliary data structure is not easy to update when the control flow
/// graph changes, which is why it is kept separate.
pub struct DominatorTreePreorder {
nodes: SecondaryMap<Ebb, ExtraNode>,
nodes: SecondaryMap<Block, ExtraNode>,
// Scratch memory used by `compute_postorder()`.
stack: Vec<Ebb>,
stack: Vec<Block>,
}
#[derive(Default, Clone)]
struct ExtraNode {
/// First child node in the domtree.
child: PackedOption<Ebb>,
child: PackedOption<Block>,
/// Next sibling node in the domtree. This linked list is ordered according to the CFG RPO.
sibling: PackedOption<Ebb>,
sibling: PackedOption<Block>,
/// Sequence number for this node in a pre-order traversal of the dominator tree.
/// Unreachable blocks have number 0, the entry block is 1.
@@ -501,23 +503,23 @@ impl DominatorTreePreorder {
//
// By following the CFG post-order and pushing to the front of the lists, we make sure that
// sibling lists are ordered according to the CFG reverse post-order.
for &ebb in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(ebb) {
let idom = layout.pp_ebb(idom_inst);
let sib = mem::replace(&mut self.nodes[idom].child, ebb.into());
self.nodes[ebb].sibling = sib;
for &block in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(block) {
let idom = layout.pp_block(idom_inst);
let sib = mem::replace(&mut self.nodes[idom].child, block.into());
self.nodes[block].sibling = sib;
} else {
// The only EBB without an immediate dominator is the entry.
self.stack.push(ebb);
// The only block without an immediate dominator is the entry.
self.stack.push(block);
}
}
// Step 2. Assign pre-order numbers from a DFS of the dominator tree.
debug_assert!(self.stack.len() <= 1);
let mut n = 0;
while let Some(ebb) = self.stack.pop() {
while let Some(block) = self.stack.pop() {
n += 1;
let node = &mut self.nodes[ebb];
let node = &mut self.nodes[block];
node.pre_number = n;
node.pre_max = n;
if let Some(n) = node.sibling.expand() {
@@ -531,29 +533,29 @@ impl DominatorTreePreorder {
// Step 3. Propagate the `pre_max` numbers up the tree.
// The CFG post-order is topologically ordered w.r.t. dominance so a node comes after all
// its dominator tree children.
for &ebb in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(ebb) {
let idom = layout.pp_ebb(idom_inst);
let pre_max = cmp::max(self.nodes[ebb].pre_max, self.nodes[idom].pre_max);
for &block in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(block) {
let idom = layout.pp_block(idom_inst);
let pre_max = cmp::max(self.nodes[block].pre_max, self.nodes[idom].pre_max);
self.nodes[idom].pre_max = pre_max;
}
}
}
}
/// An iterator that enumerates the direct children of an EBB in the dominator tree.
/// An iterator that enumerates the direct children of an block in the dominator tree.
pub struct ChildIter<'a> {
dtpo: &'a DominatorTreePreorder,
next: PackedOption<Ebb>,
next: PackedOption<Block>,
}
impl<'a> Iterator for ChildIter<'a> {
type Item = Ebb;
type Item = Block;
fn next(&mut self) -> Option<Ebb> {
fn next(&mut self) -> Option<Block> {
let n = self.next.expand();
if let Some(ebb) = n {
self.next = self.dtpo.nodes[ebb].sibling;
if let Some(block) = n {
self.next = self.dtpo.nodes[block].sibling;
}
n
}
@@ -561,32 +563,32 @@ impl<'a> Iterator for ChildIter<'a> {
/// Query interface for the dominator tree pre-order.
impl DominatorTreePreorder {
/// Get an iterator over the direct children of `ebb` in the dominator tree.
/// Get an iterator over the direct children of `block` in the dominator tree.
///
/// These are the EBB's whose immediate dominator is an instruction in `ebb`, ordered according
/// These are the block's whose immediate dominator is an instruction in `block`, ordered according
/// to the CFG reverse post-order.
pub fn children(&self, ebb: Ebb) -> ChildIter {
pub fn children(&self, block: Block) -> ChildIter {
ChildIter {
dtpo: self,
next: self.nodes[ebb].child,
next: self.nodes[block].child,
}
}
/// Fast, constant time dominance check with EBB granularity.
/// Fast, constant time dominance check with block granularity.
///
/// This computes the same result as `domtree.dominates(a, b)`, but in guaranteed fast constant
/// time. This is less general than the `DominatorTree` method because it only works with EBB
/// time. This is less general than the `DominatorTree` method because it only works with block
/// program points.
///
/// An EBB is considered to dominate itself.
pub fn dominates(&self, a: Ebb, b: Ebb) -> bool {
/// An block is considered to dominate itself.
pub fn dominates(&self, a: Block, b: Block) -> bool {
let na = &self.nodes[a];
let nb = &self.nodes[b];
na.pre_number <= nb.pre_number && na.pre_max >= nb.pre_max
}
/// Compare two EBBs according to the dominator pre-order.
pub fn pre_cmp_ebb(&self, a: Ebb, b: Ebb) -> Ordering {
/// Compare two blocks according to the dominator pre-order.
pub fn pre_cmp_block(&self, a: Block, b: Block) -> Ordering {
self.nodes[a].pre_number.cmp(&self.nodes[b].pre_number)
}
@@ -601,7 +603,7 @@ impl DominatorTreePreorder {
{
let a = a.into();
let b = b.into();
self.pre_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b))
self.pre_cmp_block(layout.pp_block(a), layout.pp_block(b))
.then(layout.cmp(a, b))
}
@@ -643,23 +645,23 @@ mod tests {
#[test]
fn unreachable_node() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let v0 = func.dfg.append_ebb_param(ebb0, I32);
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
cur.ins().brnz(v0, ebb2, &[]);
cur.insert_block(block0);
cur.ins().brnz(v0, block2, &[]);
cur.ins().trap(TrapCode::User(0));
cur.insert_ebb(ebb1);
cur.insert_block(block1);
let v1 = cur.ins().iconst(I32, 1);
let v2 = cur.ins().iadd(v0, v1);
cur.ins().jump(ebb0, &[v2]);
cur.ins().jump(block0, &[v2]);
cur.insert_ebb(ebb2);
cur.insert_block(block2);
cur.ins().return_(&[v0]);
let cfg = ControlFlowGraph::with_function(cur.func);
@@ -667,96 +669,99 @@ mod tests {
// Fall-through-first, prune-at-source DFT:
//
// ebb0 {
// brnz ebb2 {
// block0 {
// brnz block2 {
// trap
// ebb2 {
// block2 {
// return
// } ebb2
// } ebb0
assert_eq!(dt.cfg_postorder(), &[ebb2, ebb0]);
// } block2
// } block0
assert_eq!(dt.cfg_postorder(), &[block2, block0]);
let v2_def = cur.func.dfg.value_def(v2).unwrap_inst();
assert!(!dt.dominates(v2_def, ebb0, &cur.func.layout));
assert!(!dt.dominates(ebb0, v2_def, &cur.func.layout));
assert!(!dt.dominates(v2_def, block0, &cur.func.layout));
assert!(!dt.dominates(block0, v2_def, &cur.func.layout));
let mut dtpo = DominatorTreePreorder::new();
dtpo.compute(&dt, &cur.func.layout);
assert!(dtpo.dominates(ebb0, ebb0));
assert!(!dtpo.dominates(ebb0, ebb1));
assert!(dtpo.dominates(ebb0, ebb2));
assert!(!dtpo.dominates(ebb1, ebb0));
assert!(dtpo.dominates(ebb1, ebb1));
assert!(!dtpo.dominates(ebb1, ebb2));
assert!(!dtpo.dominates(ebb2, ebb0));
assert!(!dtpo.dominates(ebb2, ebb1));
assert!(dtpo.dominates(ebb2, ebb2));
assert!(dtpo.dominates(block0, block0));
assert!(!dtpo.dominates(block0, block1));
assert!(dtpo.dominates(block0, block2));
assert!(!dtpo.dominates(block1, block0));
assert!(dtpo.dominates(block1, block1));
assert!(!dtpo.dominates(block1, block2));
assert!(!dtpo.dominates(block2, block0));
assert!(!dtpo.dominates(block2, block1));
assert!(dtpo.dominates(block2, block2));
}
#[test]
fn non_zero_entry_block() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let ebb3 = func.dfg.make_ebb();
let cond = func.dfg.append_ebb_param(ebb3, I32);
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let block3 = func.dfg.make_block();
let cond = func.dfg.append_block_param(block3, I32);
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb3);
let jmp_ebb3_ebb1 = cur.ins().jump(ebb1, &[]);
cur.insert_block(block3);
let jmp_block3_block1 = cur.ins().jump(block1, &[]);
cur.insert_ebb(ebb1);
let br_ebb1_ebb0 = cur.ins().brnz(cond, ebb0, &[]);
let jmp_ebb1_ebb2 = cur.ins().jump(ebb2, &[]);
cur.insert_block(block1);
let br_block1_block0 = cur.ins().brnz(cond, block0, &[]);
let jmp_block1_block2 = cur.ins().jump(block2, &[]);
cur.insert_ebb(ebb2);
cur.ins().jump(ebb0, &[]);
cur.insert_block(block2);
cur.ins().jump(block0, &[]);
cur.insert_ebb(ebb0);
cur.insert_block(block0);
let cfg = ControlFlowGraph::with_function(cur.func);
let dt = DominatorTree::with_function(cur.func, &cfg);
// Fall-through-first, prune-at-source DFT:
//
// ebb3 {
// ebb3:jump ebb1 {
// ebb1 {
// ebb1:brnz ebb0 {
// ebb1:jump ebb2 {
// ebb2 {
// ebb2:jump ebb0 (seen)
// } ebb2
// } ebb1:jump ebb2
// ebb0 {
// } ebb0
// } ebb1:brnz ebb0
// } ebb1
// } ebb3:jump ebb1
// } ebb3
// block3 {
// block3:jump block1 {
// block1 {
// block1:brnz block0 {
// block1:jump block2 {
// block2 {
// block2:jump block0 (seen)
// } block2
// } block1:jump block2
// block0 {
// } block0
// } block1:brnz block0
// } block1
// } block3:jump block1
// } block3
assert_eq!(dt.cfg_postorder(), &[ebb2, ebb0, ebb1, ebb3]);
assert_eq!(dt.cfg_postorder(), &[block2, block0, block1, block3]);
assert_eq!(cur.func.layout.entry_block().unwrap(), ebb3);
assert_eq!(dt.idom(ebb3), None);
assert_eq!(dt.idom(ebb1).unwrap(), jmp_ebb3_ebb1);
assert_eq!(dt.idom(ebb2).unwrap(), jmp_ebb1_ebb2);
assert_eq!(dt.idom(ebb0).unwrap(), br_ebb1_ebb0);
assert_eq!(cur.func.layout.entry_block().unwrap(), block3);
assert_eq!(dt.idom(block3), None);
assert_eq!(dt.idom(block1).unwrap(), jmp_block3_block1);
assert_eq!(dt.idom(block2).unwrap(), jmp_block1_block2);
assert_eq!(dt.idom(block0).unwrap(), br_block1_block0);
assert!(dt.dominates(br_ebb1_ebb0, br_ebb1_ebb0, &cur.func.layout));
assert!(!dt.dominates(br_ebb1_ebb0, jmp_ebb3_ebb1, &cur.func.layout));
assert!(dt.dominates(jmp_ebb3_ebb1, br_ebb1_ebb0, &cur.func.layout));
assert!(dt.dominates(br_block1_block0, br_block1_block0, &cur.func.layout));
assert!(!dt.dominates(br_block1_block0, jmp_block3_block1, &cur.func.layout));
assert!(dt.dominates(jmp_block3_block1, br_block1_block0, &cur.func.layout));
assert_eq!(dt.rpo_cmp(ebb3, ebb3, &cur.func.layout), Ordering::Equal);
assert_eq!(dt.rpo_cmp(ebb3, ebb1, &cur.func.layout), Ordering::Less);
assert_eq!(
dt.rpo_cmp(ebb3, jmp_ebb3_ebb1, &cur.func.layout),
dt.rpo_cmp(block3, block3, &cur.func.layout),
Ordering::Equal
);
assert_eq!(dt.rpo_cmp(block3, block1, &cur.func.layout), Ordering::Less);
assert_eq!(
dt.rpo_cmp(block3, jmp_block3_block1, &cur.func.layout),
Ordering::Less
);
assert_eq!(
dt.rpo_cmp(jmp_ebb3_ebb1, jmp_ebb1_ebb2, &cur.func.layout),
dt.rpo_cmp(jmp_block3_block1, jmp_block1_block2, &cur.func.layout),
Ordering::Less
);
}
@@ -764,69 +769,69 @@ mod tests {
#[test]
fn backwards_layout() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
let jmp02 = cur.ins().jump(ebb2, &[]);
cur.insert_block(block0);
let jmp02 = cur.ins().jump(block2, &[]);
cur.insert_ebb(ebb1);
cur.insert_block(block1);
let trap = cur.ins().trap(TrapCode::User(5));
cur.insert_ebb(ebb2);
let jmp21 = cur.ins().jump(ebb1, &[]);
cur.insert_block(block2);
let jmp21 = cur.ins().jump(block1, &[]);
let cfg = ControlFlowGraph::with_function(cur.func);
let dt = DominatorTree::with_function(cur.func, &cfg);
assert_eq!(cur.func.layout.entry_block(), Some(ebb0));
assert_eq!(dt.idom(ebb0), None);
assert_eq!(dt.idom(ebb1), Some(jmp21));
assert_eq!(dt.idom(ebb2), Some(jmp02));
assert_eq!(cur.func.layout.entry_block(), Some(block0));
assert_eq!(dt.idom(block0), None);
assert_eq!(dt.idom(block1), Some(jmp21));
assert_eq!(dt.idom(block2), Some(jmp02));
assert!(dt.dominates(ebb0, ebb0, &cur.func.layout));
assert!(dt.dominates(ebb0, jmp02, &cur.func.layout));
assert!(dt.dominates(ebb0, ebb1, &cur.func.layout));
assert!(dt.dominates(ebb0, trap, &cur.func.layout));
assert!(dt.dominates(ebb0, ebb2, &cur.func.layout));
assert!(dt.dominates(ebb0, jmp21, &cur.func.layout));
assert!(dt.dominates(block0, block0, &cur.func.layout));
assert!(dt.dominates(block0, jmp02, &cur.func.layout));
assert!(dt.dominates(block0, block1, &cur.func.layout));
assert!(dt.dominates(block0, trap, &cur.func.layout));
assert!(dt.dominates(block0, block2, &cur.func.layout));
assert!(dt.dominates(block0, jmp21, &cur.func.layout));
assert!(!dt.dominates(jmp02, ebb0, &cur.func.layout));
assert!(!dt.dominates(jmp02, block0, &cur.func.layout));
assert!(dt.dominates(jmp02, jmp02, &cur.func.layout));
assert!(dt.dominates(jmp02, ebb1, &cur.func.layout));
assert!(dt.dominates(jmp02, block1, &cur.func.layout));
assert!(dt.dominates(jmp02, trap, &cur.func.layout));
assert!(dt.dominates(jmp02, ebb2, &cur.func.layout));
assert!(dt.dominates(jmp02, block2, &cur.func.layout));
assert!(dt.dominates(jmp02, jmp21, &cur.func.layout));
assert!(!dt.dominates(ebb1, ebb0, &cur.func.layout));
assert!(!dt.dominates(ebb1, jmp02, &cur.func.layout));
assert!(dt.dominates(ebb1, ebb1, &cur.func.layout));
assert!(dt.dominates(ebb1, trap, &cur.func.layout));
assert!(!dt.dominates(ebb1, ebb2, &cur.func.layout));
assert!(!dt.dominates(ebb1, jmp21, &cur.func.layout));
assert!(!dt.dominates(block1, block0, &cur.func.layout));
assert!(!dt.dominates(block1, jmp02, &cur.func.layout));
assert!(dt.dominates(block1, block1, &cur.func.layout));
assert!(dt.dominates(block1, trap, &cur.func.layout));
assert!(!dt.dominates(block1, block2, &cur.func.layout));
assert!(!dt.dominates(block1, jmp21, &cur.func.layout));
assert!(!dt.dominates(trap, ebb0, &cur.func.layout));
assert!(!dt.dominates(trap, block0, &cur.func.layout));
assert!(!dt.dominates(trap, jmp02, &cur.func.layout));
assert!(!dt.dominates(trap, ebb1, &cur.func.layout));
assert!(!dt.dominates(trap, block1, &cur.func.layout));
assert!(dt.dominates(trap, trap, &cur.func.layout));
assert!(!dt.dominates(trap, ebb2, &cur.func.layout));
assert!(!dt.dominates(trap, block2, &cur.func.layout));
assert!(!dt.dominates(trap, jmp21, &cur.func.layout));
assert!(!dt.dominates(ebb2, ebb0, &cur.func.layout));
assert!(!dt.dominates(ebb2, jmp02, &cur.func.layout));
assert!(dt.dominates(ebb2, ebb1, &cur.func.layout));
assert!(dt.dominates(ebb2, trap, &cur.func.layout));
assert!(dt.dominates(ebb2, ebb2, &cur.func.layout));
assert!(dt.dominates(ebb2, jmp21, &cur.func.layout));
assert!(!dt.dominates(block2, block0, &cur.func.layout));
assert!(!dt.dominates(block2, jmp02, &cur.func.layout));
assert!(dt.dominates(block2, block1, &cur.func.layout));
assert!(dt.dominates(block2, trap, &cur.func.layout));
assert!(dt.dominates(block2, block2, &cur.func.layout));
assert!(dt.dominates(block2, jmp21, &cur.func.layout));
assert!(!dt.dominates(jmp21, ebb0, &cur.func.layout));
assert!(!dt.dominates(jmp21, block0, &cur.func.layout));
assert!(!dt.dominates(jmp21, jmp02, &cur.func.layout));
assert!(dt.dominates(jmp21, ebb1, &cur.func.layout));
assert!(dt.dominates(jmp21, block1, &cur.func.layout));
assert!(dt.dominates(jmp21, trap, &cur.func.layout));
assert!(!dt.dominates(jmp21, ebb2, &cur.func.layout));
assert!(!dt.dominates(jmp21, block2, &cur.func.layout));
assert!(dt.dominates(jmp21, jmp21, &cur.func.layout));
}
}

View File

@@ -1,80 +1,80 @@
//! A control flow graph represented as mappings of extended basic blocks to their predecessors
//! A control flow graph represented as mappings of basic blocks to their predecessors
//! and successors.
//!
//! Successors are represented as extended basic blocks while predecessors are represented by basic
//! blocks. Basic blocks are denoted by tuples of EBB and branch/jump instructions. Each
//! Successors are represented as basic blocks while predecessors are represented by basic
//! blocks. Basic blocks are denoted by tuples of block and branch/jump instructions. Each
//! predecessor tuple corresponds to the end of a basic block.
//!
//! ```c
//! Ebb0:
//! Block0:
//! ... ; beginning of basic block
//!
//! ...
//!
//! brz vx, Ebb1 ; end of basic block
//! brz vx, Block1 ; end of basic block
//!
//! ... ; beginning of basic block
//!
//! ...
//!
//! jmp Ebb2 ; end of basic block
//! jmp Block2 ; end of basic block
//! ```
//!
//! Here `Ebb1` and `Ebb2` would each have a single predecessor denoted as `(Ebb0, brz)`
//! and `(Ebb0, jmp Ebb2)` respectively.
//! Here `Block1` and `Block2` would each have a single predecessor denoted as `(Block0, brz)`
//! and `(Block0, jmp Block2)` respectively.
use crate::bforest;
use crate::entity::SecondaryMap;
use crate::ir::instructions::BranchInfo;
use crate::ir::{Ebb, Function, Inst};
use crate::ir::{Block, Function, Inst};
use crate::timing;
use core::mem;
/// A basic block denoted by its enclosing Ebb and last instruction.
/// A basic block denoted by its enclosing Block and last instruction.
#[derive(Debug, PartialEq, Eq)]
pub struct BasicBlock {
/// Enclosing Ebb key.
pub ebb: Ebb,
pub struct BlockPredecessor {
/// Enclosing Block key.
pub block: Block,
/// Last instruction in the basic block.
pub inst: Inst,
}
impl BasicBlock {
/// Convenient method to construct new BasicBlock.
pub fn new(ebb: Ebb, inst: Inst) -> Self {
Self { ebb, inst }
impl BlockPredecessor {
/// Convenient method to construct new BlockPredecessor.
pub fn new(block: Block, inst: Inst) -> Self {
Self { block, inst }
}
}
/// A container for the successors and predecessors of some Ebb.
/// A container for the successors and predecessors of some Block.
#[derive(Clone, Default)]
struct CFGNode {
/// Instructions that can branch or jump to this EBB.
/// Instructions that can branch or jump to this block.
///
/// This maps branch instruction -> predecessor EBB which is redundant since the EBB containing
/// the branch instruction is available from the `layout.inst_ebb()` method. We store the
/// This maps branch instruction -> predecessor block which is redundant since the block containing
/// the branch instruction is available from the `layout.inst_block()` method. We store the
/// redundant information because:
///
/// 1. Many `pred_iter()` consumers want the EBB anyway, so it is handily available.
/// 2. The `invalidate_ebb_successors()` may be called *after* branches have been removed from
/// their EBB, but we still need to remove them form the old EBB predecessor map.
/// 1. Many `pred_iter()` consumers want the block anyway, so it is handily available.
/// 2. The `invalidate_block_successors()` may be called *after* branches have been removed from
/// their block, but we still need to remove them form the old block predecessor map.
///
/// The redundant EBB stored here is always consistent with the CFG successor lists, even after
/// The redundant block stored here is always consistent with the CFG successor lists, even after
/// the IR has been edited.
pub predecessors: bforest::Map<Inst, Ebb>,
pub predecessors: bforest::Map<Inst, Block>,
/// Set of EBBs that are the targets of branches and jumps in this EBB.
/// The set is ordered by EBB number, indicated by the `()` comparator type.
pub successors: bforest::Set<Ebb>,
/// Set of blocks that are the targets of branches and jumps in this block.
/// The set is ordered by block number, indicated by the `()` comparator type.
pub successors: bforest::Set<Block>,
}
/// The Control Flow Graph maintains a mapping of ebbs to their predecessors
/// The Control Flow Graph maintains a mapping of blocks to their predecessors
/// and successors where predecessors are basic blocks and successors are
/// extended basic blocks.
/// basic blocks.
pub struct ControlFlowGraph {
data: SecondaryMap<Ebb, CFGNode>,
pred_forest: bforest::MapForest<Inst, Ebb>,
succ_forest: bforest::SetForest<Ebb>,
data: SecondaryMap<Block, CFGNode>,
pred_forest: bforest::MapForest<Inst, Block>,
succ_forest: bforest::SetForest<Block>,
valid: bool,
}
@@ -110,27 +110,27 @@ impl ControlFlowGraph {
pub fn compute(&mut self, func: &Function) {
let _tt = timing::flowgraph();
self.clear();
self.data.resize(func.dfg.num_ebbs());
self.data.resize(func.dfg.num_blocks());
for ebb in &func.layout {
self.compute_ebb(func, ebb);
for block in &func.layout {
self.compute_block(func, block);
}
self.valid = true;
}
fn compute_ebb(&mut self, func: &Function, ebb: Ebb) {
for inst in func.layout.ebb_insts(ebb) {
fn compute_block(&mut self, func: &Function, block: Block) {
for inst in func.layout.block_insts(block) {
match func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(dest, _) => {
self.add_edge(ebb, inst, dest);
self.add_edge(block, inst, dest);
}
BranchInfo::Table(jt, dest) => {
if let Some(dest) = dest {
self.add_edge(ebb, inst, dest);
self.add_edge(block, inst, dest);
}
for dest in func.jump_tables[jt].iter() {
self.add_edge(ebb, inst, *dest);
self.add_edge(block, inst, *dest);
}
}
BranchInfo::NotABranch => {}
@@ -138,32 +138,32 @@ impl ControlFlowGraph {
}
}
fn invalidate_ebb_successors(&mut self, ebb: Ebb) {
fn invalidate_block_successors(&mut self, block: Block) {
// Temporarily take ownership because we need mutable access to self.data inside the loop.
// Unfortunately borrowck cannot see that our mut accesses to predecessors don't alias
// our iteration over successors.
let mut successors = mem::replace(&mut self.data[ebb].successors, Default::default());
let mut successors = mem::replace(&mut self.data[block].successors, Default::default());
for succ in successors.iter(&self.succ_forest) {
self.data[succ]
.predecessors
.retain(&mut self.pred_forest, |_, &mut e| e != ebb);
.retain(&mut self.pred_forest, |_, &mut e| e != block);
}
successors.clear(&mut self.succ_forest);
}
/// Recompute the control flow graph of `ebb`.
/// Recompute the control flow graph of `block`.
///
/// This is for use after modifying instructions within a specific EBB. It recomputes all edges
/// from `ebb` while leaving edges to `ebb` intact. Its functionality a subset of that of the
/// This is for use after modifying instructions within a specific block. It recomputes all edges
/// from `block` while leaving edges to `block` intact. Its functionality a subset of that of the
/// more expensive `compute`, and should be used when we know we don't need to recompute the CFG
/// from scratch, but rather that our changes have been restricted to specific EBBs.
pub fn recompute_ebb(&mut self, func: &Function, ebb: Ebb) {
/// from scratch, but rather that our changes have been restricted to specific blocks.
pub fn recompute_block(&mut self, func: &Function, block: Block) {
debug_assert!(self.is_valid());
self.invalidate_ebb_successors(ebb);
self.compute_ebb(func, ebb);
self.invalidate_block_successors(block);
self.compute_block(func, block);
}
fn add_edge(&mut self, from: Ebb, from_inst: Inst, to: Ebb) {
fn add_edge(&mut self, from: Block, from_inst: Inst, to: Block) {
self.data[from]
.successors
.insert(to, &mut self.succ_forest, &());
@@ -172,15 +172,15 @@ impl ControlFlowGraph {
.insert(from_inst, from, &mut self.pred_forest, &());
}
/// Get an iterator over the CFG predecessors to `ebb`.
pub fn pred_iter(&self, ebb: Ebb) -> PredIter {
PredIter(self.data[ebb].predecessors.iter(&self.pred_forest))
/// Get an iterator over the CFG predecessors to `block`.
pub fn pred_iter(&self, block: Block) -> PredIter {
PredIter(self.data[block].predecessors.iter(&self.pred_forest))
}
/// Get an iterator over the CFG successors to `ebb`.
pub fn succ_iter(&self, ebb: Ebb) -> SuccIter {
/// Get an iterator over the CFG successors to `block`.
pub fn succ_iter(&self, block: Block) -> SuccIter {
debug_assert!(self.is_valid());
self.data[ebb].successors.iter(&self.succ_forest)
self.data[block].successors.iter(&self.succ_forest)
}
/// Check if the CFG is in a valid state.
@@ -193,21 +193,21 @@ impl ControlFlowGraph {
}
}
/// An iterator over EBB predecessors. The iterator type is `BasicBlock`.
/// An iterator over block predecessors. The iterator type is `BlockPredecessor`.
///
/// Each predecessor is an instruction that branches to the EBB.
pub struct PredIter<'a>(bforest::MapIter<'a, Inst, Ebb>);
/// Each predecessor is an instruction that branches to the block.
pub struct PredIter<'a>(bforest::MapIter<'a, Inst, Block>);
impl<'a> Iterator for PredIter<'a> {
type Item = BasicBlock;
type Item = BlockPredecessor;
fn next(&mut self) -> Option<BasicBlock> {
self.0.next().map(|(i, e)| BasicBlock::new(e, i))
fn next(&mut self) -> Option<BlockPredecessor> {
self.0.next().map(|(i, e)| BlockPredecessor::new(e, i))
}
}
/// An iterator over EBB successors. The iterator type is `Ebb`.
pub type SuccIter<'a> = bforest::SetIter<'a, Ebb>;
/// An iterator over block successors. The iterator type is `Block`.
pub type SuccIter<'a> = bforest::SetIter<'a, Block>;
#[cfg(test)]
mod tests {
@@ -225,126 +225,126 @@ mod tests {
#[test]
fn no_predecessors() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
func.layout.append_ebb(ebb0);
func.layout.append_ebb(ebb1);
func.layout.append_ebb(ebb2);
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
func.layout.append_block(block0);
func.layout.append_block(block1);
func.layout.append_block(block2);
let cfg = ControlFlowGraph::with_function(&func);
let mut fun_ebbs = func.layout.ebbs();
for ebb in func.layout.ebbs() {
assert_eq!(ebb, fun_ebbs.next().unwrap());
assert_eq!(cfg.pred_iter(ebb).count(), 0);
assert_eq!(cfg.succ_iter(ebb).count(), 0);
let mut fun_blocks = func.layout.blocks();
for block in func.layout.blocks() {
assert_eq!(block, fun_blocks.next().unwrap());
assert_eq!(cfg.pred_iter(block).count(), 0);
assert_eq!(cfg.succ_iter(block).count(), 0);
}
}
#[test]
fn branches_and_jumps() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let cond = func.dfg.append_ebb_param(ebb0, types::I32);
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let cond = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let br_ebb0_ebb2;
let br_ebb1_ebb1;
let jmp_ebb0_ebb1;
let jmp_ebb1_ebb2;
let br_block0_block2;
let br_block1_block1;
let jmp_block0_block1;
let jmp_block1_block2;
{
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
br_ebb0_ebb2 = cur.ins().brnz(cond, ebb2, &[]);
jmp_ebb0_ebb1 = cur.ins().jump(ebb1, &[]);
cur.insert_block(block0);
br_block0_block2 = cur.ins().brnz(cond, block2, &[]);
jmp_block0_block1 = cur.ins().jump(block1, &[]);
cur.insert_ebb(ebb1);
br_ebb1_ebb1 = cur.ins().brnz(cond, ebb1, &[]);
jmp_ebb1_ebb2 = cur.ins().jump(ebb2, &[]);
cur.insert_block(block1);
br_block1_block1 = cur.ins().brnz(cond, block1, &[]);
jmp_block1_block2 = cur.ins().jump(block2, &[]);
cur.insert_ebb(ebb2);
cur.insert_block(block2);
}
let mut cfg = ControlFlowGraph::with_function(&func);
{
let ebb0_predecessors = cfg.pred_iter(ebb0).collect::<Vec<_>>();
let ebb1_predecessors = cfg.pred_iter(ebb1).collect::<Vec<_>>();
let ebb2_predecessors = cfg.pred_iter(ebb2).collect::<Vec<_>>();
let block0_predecessors = cfg.pred_iter(block0).collect::<Vec<_>>();
let block1_predecessors = cfg.pred_iter(block1).collect::<Vec<_>>();
let block2_predecessors = cfg.pred_iter(block2).collect::<Vec<_>>();
let ebb0_successors = cfg.succ_iter(ebb0).collect::<Vec<_>>();
let ebb1_successors = cfg.succ_iter(ebb1).collect::<Vec<_>>();
let ebb2_successors = cfg.succ_iter(ebb2).collect::<Vec<_>>();
let block0_successors = cfg.succ_iter(block0).collect::<Vec<_>>();
let block1_successors = cfg.succ_iter(block1).collect::<Vec<_>>();
let block2_successors = cfg.succ_iter(block2).collect::<Vec<_>>();
assert_eq!(ebb0_predecessors.len(), 0);
assert_eq!(ebb1_predecessors.len(), 2);
assert_eq!(ebb2_predecessors.len(), 2);
assert_eq!(block0_predecessors.len(), 0);
assert_eq!(block1_predecessors.len(), 2);
assert_eq!(block2_predecessors.len(), 2);
assert_eq!(
ebb1_predecessors.contains(&BasicBlock::new(ebb0, jmp_ebb0_ebb1)),
block1_predecessors.contains(&BlockPredecessor::new(block0, jmp_block0_block1)),
true
);
assert_eq!(
ebb1_predecessors.contains(&BasicBlock::new(ebb1, br_ebb1_ebb1)),
block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)),
true
);
assert_eq!(
ebb2_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb2)),
block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)),
true
);
assert_eq!(
ebb2_predecessors.contains(&BasicBlock::new(ebb1, jmp_ebb1_ebb2)),
block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)),
true
);
assert_eq!(ebb0_successors, [ebb1, ebb2]);
assert_eq!(ebb1_successors, [ebb1, ebb2]);
assert_eq!(ebb2_successors, []);
assert_eq!(block0_successors, [block1, block2]);
assert_eq!(block1_successors, [block1, block2]);
assert_eq!(block2_successors, []);
}
// Change some instructions and recompute ebb0
func.dfg.replace(br_ebb0_ebb2).brnz(cond, ebb1, &[]);
func.dfg.replace(jmp_ebb0_ebb1).return_(&[]);
cfg.recompute_ebb(&mut func, ebb0);
let br_ebb0_ebb1 = br_ebb0_ebb2;
// Change some instructions and recompute block0
func.dfg.replace(br_block0_block2).brnz(cond, block1, &[]);
func.dfg.replace(jmp_block0_block1).return_(&[]);
cfg.recompute_block(&mut func, block0);
let br_block0_block1 = br_block0_block2;
{
let ebb0_predecessors = cfg.pred_iter(ebb0).collect::<Vec<_>>();
let ebb1_predecessors = cfg.pred_iter(ebb1).collect::<Vec<_>>();
let ebb2_predecessors = cfg.pred_iter(ebb2).collect::<Vec<_>>();
let block0_predecessors = cfg.pred_iter(block0).collect::<Vec<_>>();
let block1_predecessors = cfg.pred_iter(block1).collect::<Vec<_>>();
let block2_predecessors = cfg.pred_iter(block2).collect::<Vec<_>>();
let ebb0_successors = cfg.succ_iter(ebb0);
let ebb1_successors = cfg.succ_iter(ebb1);
let ebb2_successors = cfg.succ_iter(ebb2);
let block0_successors = cfg.succ_iter(block0);
let block1_successors = cfg.succ_iter(block1);
let block2_successors = cfg.succ_iter(block2);
assert_eq!(ebb0_predecessors.len(), 0);
assert_eq!(ebb1_predecessors.len(), 2);
assert_eq!(ebb2_predecessors.len(), 1);
assert_eq!(block0_predecessors.len(), 0);
assert_eq!(block1_predecessors.len(), 2);
assert_eq!(block2_predecessors.len(), 1);
assert_eq!(
ebb1_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb1)),
block1_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block1)),
true
);
assert_eq!(
ebb1_predecessors.contains(&BasicBlock::new(ebb1, br_ebb1_ebb1)),
block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)),
true
);
assert_eq!(
ebb2_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb2)),
block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)),
false
);
assert_eq!(
ebb2_predecessors.contains(&BasicBlock::new(ebb1, jmp_ebb1_ebb2)),
block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)),
true
);
assert_eq!(ebb0_successors.collect::<Vec<_>>(), [ebb1]);
assert_eq!(ebb1_successors.collect::<Vec<_>>(), [ebb1, ebb2]);
assert_eq!(ebb2_successors.collect::<Vec<_>>(), []);
assert_eq!(block0_successors.collect::<Vec<_>>(), [block1]);
assert_eq!(block1_successors.collect::<Vec<_>>(), [block1, block2]);
assert_eq!(block2_successors.collect::<Vec<_>>(), []);
}
}
}

View File

@@ -223,10 +223,10 @@ mod tests {
#[test]
fn types() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let arg0 = func.dfg.append_ebb_param(ebb0, I32);
let block0 = func.dfg.make_block();
let arg0 = func.dfg.append_block_param(block0, I32);
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
// Explicit types.
let v0 = pos.ins().iconst(I32, 3);
@@ -244,10 +244,10 @@ mod tests {
#[test]
fn reuse_results() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let arg0 = func.dfg.append_ebb_param(ebb0, I32);
let block0 = func.dfg.make_block();
let arg0 = func.dfg.append_block_param(block0, I32);
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
let v0 = pos.ins().iadd_imm(arg0, 17);
assert_eq!(pos.func.dfg.value_type(v0), I32);

View File

@@ -1,4 +1,4 @@
//! Data flow graph tracking Instructions, Values, and EBBs.
//! Data flow graph tracking Instructions, Values, and blocks.
use crate::entity::{self, PrimaryMap, SecondaryMap};
use crate::ir;
@@ -7,7 +7,7 @@ use crate::ir::extfunc::ExtFuncData;
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData};
use crate::ir::{types, ConstantData, ConstantPool, Immediate};
use crate::ir::{
Ebb, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList,
Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList,
ValueListPool,
};
use crate::isa::TargetIsa;
@@ -21,18 +21,18 @@ use core::mem;
use core::ops::{Index, IndexMut};
use core::u16;
/// A data flow graph defines all instructions and extended basic blocks in a function as well as
/// A data flow graph defines all instructions and basic blocks in a function as well as
/// the data flow dependencies between them. The DFG also tracks values which can be either
/// instruction results or EBB parameters.
/// instruction results or block parameters.
///
/// The layout of EBBs in the function and of instructions in each EBB is recorded by the
/// The layout of blocks in the function and of instructions in each block is recorded by the
/// `Layout` data structure which forms the other half of the function representation.
///
#[derive(Clone)]
pub struct DataFlowGraph {
/// Data about all of the instructions in the function, including opcodes and operands.
/// The instructions in this map are not in program order. That is tracked by `Layout`, along
/// with the EBB containing each instruction.
/// with the block containing each instruction.
insts: PrimaryMap<Inst, InstructionData>,
/// List of result values for each instruction.
@@ -41,11 +41,11 @@ pub struct DataFlowGraph {
/// primary `insts` map.
results: SecondaryMap<Inst, ValueList>,
/// Extended basic blocks in the function and their parameters.
/// basic blocks in the function and their parameters.
///
/// This map is not in program order. That is handled by `Layout`, and so is the sequence of
/// instructions contained in each EBB.
ebbs: PrimaryMap<Ebb, EbbData>,
/// instructions contained in each block.
blocks: PrimaryMap<Block, BlockData>,
/// Memory pool of value lists.
///
@@ -53,7 +53,7 @@ pub struct DataFlowGraph {
///
/// - Instructions in `insts` that don't have room for their entire argument list inline.
/// - Instruction result values in `results`.
/// - EBB parameters in `ebbs`.
/// - block parameters in `blocks`.
pub value_lists: ValueListPool,
/// Primary value table with entries for all values.
@@ -85,7 +85,7 @@ impl DataFlowGraph {
Self {
insts: PrimaryMap::new(),
results: SecondaryMap::new(),
ebbs: PrimaryMap::new(),
blocks: PrimaryMap::new(),
value_lists: ValueListPool::new(),
values: PrimaryMap::new(),
signatures: PrimaryMap::new(),
@@ -101,7 +101,7 @@ impl DataFlowGraph {
pub fn clear(&mut self) {
self.insts.clear();
self.results.clear();
self.ebbs.clear();
self.blocks.clear();
self.value_lists.clear();
self.values.clear();
self.signatures.clear();
@@ -125,17 +125,17 @@ impl DataFlowGraph {
self.insts.is_valid(inst)
}
/// Get the total number of extended basic blocks created in this function, whether they are
/// Get the total number of basic blocks created in this function, whether they are
/// currently inserted in the layout or not.
///
/// This is intended for use with `SecondaryMap::with_capacity`.
pub fn num_ebbs(&self) -> usize {
self.ebbs.len()
pub fn num_blocks(&self) -> usize {
self.blocks.len()
}
/// Returns `true` if the given ebb reference is valid.
pub fn ebb_is_valid(&self, ebb: Ebb) -> bool {
self.ebbs.is_valid(ebb)
/// Returns `true` if the given block reference is valid.
pub fn block_is_valid(&self, block: Block) -> bool {
self.blocks.is_valid(block)
}
/// Get the total number of values.
@@ -213,7 +213,7 @@ impl<'a> Iterator for Values<'a> {
/// Handling values.
///
/// Values are either EBB parameters or instruction results.
/// Values are either block parameters or instruction results.
impl DataFlowGraph {
/// Allocate an extended value entry.
fn make_value(&mut self, data: ValueData) -> Value {
@@ -243,12 +243,12 @@ impl DataFlowGraph {
/// Get the definition of a value.
///
/// This is either the instruction that defined it or the Ebb that has the value as an
/// This is either the instruction that defined it or the Block that has the value as an
/// parameter.
pub fn value_def(&self, v: Value) -> ValueDef {
match self.values[v] {
ValueData::Inst { inst, num, .. } => ValueDef::Result(inst, num as usize),
ValueData::Param { ebb, num, .. } => ValueDef::Param(ebb, num as usize),
ValueData::Param { block, num, .. } => ValueDef::Param(block, num as usize),
ValueData::Alias { original, .. } => {
// Make sure we only recurse one level. `resolve_aliases` has safeguards to
// detect alias loops without overrunning the stack.
@@ -257,7 +257,7 @@ impl DataFlowGraph {
}
}
/// Determine if `v` is an attached instruction result / EBB parameter.
/// Determine if `v` is an attached instruction result / block parameter.
///
/// An attached value can't be attached to something else without first being detached.
///
@@ -267,7 +267,7 @@ impl DataFlowGraph {
use self::ValueData::*;
match self.values[v] {
Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize),
Param { ebb, num, .. } => Some(&v) == self.ebb_params(ebb).get(num as usize),
Param { block, num, .. } => Some(&v) == self.block_params(block).get(num as usize),
Alias { .. } => false,
}
}
@@ -297,7 +297,7 @@ impl DataFlowGraph {
/// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest`
/// will behave as if they used that value `src`.
///
/// The `dest` value can't be attached to an instruction or EBB.
/// The `dest` value can't be attached to an instruction or block.
pub fn change_to_alias(&mut self, dest: Value, src: Value) {
debug_assert!(!self.value_is_attached(dest));
// Try to create short alias chains by finding the original source value.
@@ -376,8 +376,8 @@ impl DataFlowGraph {
pub enum ValueDef {
/// Value is the n'th result of an instruction.
Result(Inst, usize),
/// Value is the n'th parameter to an EBB.
Param(Ebb, usize),
/// Value is the n'th parameter to an block.
Param(Block, usize),
}
impl ValueDef {
@@ -389,11 +389,11 @@ impl ValueDef {
}
}
/// Unwrap the EBB there the parameter is defined, or panic.
pub fn unwrap_ebb(&self) -> Ebb {
/// Unwrap the block there the parameter is defined, or panic.
pub fn unwrap_block(&self) -> Block {
match *self {
Self::Param(ebb, _) => ebb,
_ => panic!("Value is not an EBB parameter"),
Self::Param(block, _) => block,
_ => panic!("Value is not an block parameter"),
}
}
@@ -419,12 +419,12 @@ enum ValueData {
/// Value is defined by an instruction.
Inst { ty: Type, num: u16, inst: Inst },
/// Value is an EBB parameter.
Param { ty: Type, num: u16, ebb: Ebb },
/// Value is an block parameter.
Param { ty: Type, num: u16, block: Block },
/// Value is an alias of another value.
/// An alias value can't be linked as an instruction result or EBB parameter. It is used as a
/// placeholder when the original instruction or EBB has been rewritten or modified.
/// An alias value can't be linked as an instruction result or block parameter. It is used as a
/// placeholder when the original instruction or block has been rewritten or modified.
Alias { ty: Type, original: Value },
}
@@ -760,61 +760,64 @@ impl IndexMut<Inst> for DataFlowGraph {
}
}
/// Extended basic blocks.
/// basic blocks.
impl DataFlowGraph {
/// Create a new basic block.
pub fn make_ebb(&mut self) -> Ebb {
self.ebbs.push(EbbData::new())
pub fn make_block(&mut self) -> Block {
self.blocks.push(BlockData::new())
}
/// Get the number of parameters on `ebb`.
pub fn num_ebb_params(&self, ebb: Ebb) -> usize {
self.ebbs[ebb].params.len(&self.value_lists)
/// Get the number of parameters on `block`.
pub fn num_block_params(&self, block: Block) -> usize {
self.blocks[block].params.len(&self.value_lists)
}
/// Get the parameters on `ebb`.
pub fn ebb_params(&self, ebb: Ebb) -> &[Value] {
self.ebbs[ebb].params.as_slice(&self.value_lists)
/// Get the parameters on `block`.
pub fn block_params(&self, block: Block) -> &[Value] {
self.blocks[block].params.as_slice(&self.value_lists)
}
/// Get the types of the parameters on `ebb`.
pub fn ebb_param_types(&self, ebb: Ebb) -> Vec<Type> {
self.ebb_params(ebb)
/// Get the types of the parameters on `block`.
pub fn block_param_types(&self, block: Block) -> Vec<Type> {
self.block_params(block)
.iter()
.map(|&v| self.value_type(v))
.collect()
}
/// Append a parameter with type `ty` to `ebb`.
pub fn append_ebb_param(&mut self, ebb: Ebb, ty: Type) -> Value {
/// Append a parameter with type `ty` to `block`.
pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value {
let param = self.values.next_key();
let num = self.ebbs[ebb].params.push(param, &mut self.value_lists);
debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
let num = self.blocks[block].params.push(param, &mut self.value_lists);
debug_assert!(num <= u16::MAX as usize, "Too many parameters on block");
self.make_value(ValueData::Param {
ty,
num: num as u16,
ebb,
block,
})
}
/// Removes `val` from `ebb`'s parameters by swapping it with the last parameter on `ebb`.
/// Removes `val` from `block`'s parameters by swapping it with the last parameter on `block`.
/// Returns the position of `val` before removal.
///
/// *Important*: to ensure O(1) deletion, this method swaps the removed parameter with the
/// last `ebb` parameter. This can disrupt all the branch instructions jumping to this
/// `ebb` for which you have to change the branch argument order if necessary.
/// last `block` parameter. This can disrupt all the branch instructions jumping to this
/// `block` for which you have to change the branch argument order if necessary.
///
/// Panics if `val` is not an EBB parameter.
pub fn swap_remove_ebb_param(&mut self, val: Value) -> usize {
let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[val] {
(ebb, num)
/// Panics if `val` is not an block parameter.
pub fn swap_remove_block_param(&mut self, val: Value) -> usize {
let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] {
(block, num)
} else {
panic!("{} must be an EBB parameter", val);
panic!("{} must be an block parameter", val);
};
self.ebbs[ebb]
self.blocks[block]
.params
.swap_remove(num as usize, &mut self.value_lists);
if let Some(last_arg_val) = self.ebbs[ebb].params.get(num as usize, &self.value_lists) {
if let Some(last_arg_val) = self.blocks[block]
.params
.get(num as usize, &self.value_lists)
{
// We update the position of the old last arg.
if let ValueData::Param {
num: ref mut old_num,
@@ -823,25 +826,25 @@ impl DataFlowGraph {
{
*old_num = num;
} else {
panic!("{} should be an Ebb parameter", last_arg_val);
panic!("{} should be an Block parameter", last_arg_val);
}
}
num as usize
}
/// Removes `val` from `ebb`'s parameters by a standard linear time list removal which
/// Removes `val` from `block`'s parameters by a standard linear time list removal which
/// preserves ordering. Also updates the values' data.
pub fn remove_ebb_param(&mut self, val: Value) {
let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[val] {
(ebb, num)
pub fn remove_block_param(&mut self, val: Value) {
let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] {
(block, num)
} else {
panic!("{} must be an EBB parameter", val);
panic!("{} must be an block parameter", val);
};
self.ebbs[ebb]
self.blocks[block]
.params
.remove(num as usize, &mut self.value_lists);
for index in num..(self.num_ebb_params(ebb) as u16) {
match self.values[self.ebbs[ebb]
for index in num..(self.num_block_params(block) as u16) {
match self.values[self.blocks[block]
.params
.get(index as usize, &self.value_lists)
.unwrap()]
@@ -850,8 +853,8 @@ impl DataFlowGraph {
*num -= 1;
}
_ => panic!(
"{} must be an EBB parameter",
self.ebbs[ebb]
"{} must be an block parameter",
self.blocks[block]
.params
.get(index as usize, &self.value_lists)
.unwrap()
@@ -860,71 +863,73 @@ impl DataFlowGraph {
}
}
/// Append an existing value to `ebb`'s parameters.
/// Append an existing value to `block`'s parameters.
///
/// The appended value can't already be attached to something else.
///
/// In almost all cases, you should be using `append_ebb_param()` instead of this method.
pub fn attach_ebb_param(&mut self, ebb: Ebb, param: Value) {
/// In almost all cases, you should be using `append_block_param()` instead of this method.
pub fn attach_block_param(&mut self, block: Block, param: Value) {
debug_assert!(!self.value_is_attached(param));
let num = self.ebbs[ebb].params.push(param, &mut self.value_lists);
debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
let num = self.blocks[block].params.push(param, &mut self.value_lists);
debug_assert!(num <= u16::MAX as usize, "Too many parameters on block");
let ty = self.value_type(param);
self.values[param] = ValueData::Param {
ty,
num: num as u16,
ebb,
block,
};
}
/// Replace an EBB parameter with a new value of type `ty`.
/// Replace an block parameter with a new value of type `ty`.
///
/// The `old_value` must be an attached EBB parameter. It is removed from its place in the list
/// The `old_value` must be an attached block parameter. It is removed from its place in the list
/// of parameters and replaced by a new value of type `new_type`. The new value gets the same
/// position in the list, and other parameters are not disturbed.
///
/// The old value is left detached, so it should probably be changed into something else.
///
/// Returns the new value.
pub fn replace_ebb_param(&mut self, old_value: Value, new_type: Type) -> Value {
pub fn replace_block_param(&mut self, old_value: Value, new_type: Type) -> Value {
// Create new value identical to the old one except for the type.
let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[old_value] {
(ebb, num)
let (block, num) = if let ValueData::Param { num, block, .. } = self.values[old_value] {
(block, num)
} else {
panic!("{} must be an EBB parameter", old_value);
panic!("{} must be an block parameter", old_value);
};
let new_arg = self.make_value(ValueData::Param {
ty: new_type,
num,
ebb,
block,
});
self.ebbs[ebb].params.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg;
self.blocks[block]
.params
.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg;
new_arg
}
/// Detach all the parameters from `ebb` and return them as a `ValueList`.
/// Detach all the parameters from `block` and return them as a `ValueList`.
///
/// This is a quite low-level operation. Sensible things to do with the detached EBB parameters
/// is to put them back on the same EBB with `attach_ebb_param()` or change them into aliases
/// This is a quite low-level operation. Sensible things to do with the detached block parameters
/// is to put them back on the same block with `attach_block_param()` or change them into aliases
/// with `change_to_alias()`.
pub fn detach_ebb_params(&mut self, ebb: Ebb) -> ValueList {
self.ebbs[ebb].params.take()
pub fn detach_block_params(&mut self, block: Block) -> ValueList {
self.blocks[block].params.take()
}
}
/// Contents of an extended basic block.
/// Contents of a basic block.
///
/// Parameters on an extended basic block are values that dominate everything in the EBB. All
/// branches to this EBB must provide matching arguments, and the arguments to the entry EBB must
/// Parameters on a basic block are values that dominate everything in the block. All
/// branches to this block must provide matching arguments, and the arguments to the entry block must
/// match the function arguments.
#[derive(Clone)]
struct EbbData {
/// List of parameters to this EBB.
struct BlockData {
/// List of parameters to this block.
params: ValueList,
}
impl EbbData {
impl BlockData {
fn new() -> Self {
Self {
params: ValueList::new(),
@@ -1012,17 +1017,17 @@ impl DataFlowGraph {
self.make_inst_results_reusing(inst, ctrl_typevar, reuse.iter().map(|x| Some(*x)))
}
/// Similar to `append_ebb_param`, append a parameter with type `ty` to
/// `ebb`, but using value `val`. This is only for use by the parser to
/// Similar to `append_block_param`, append a parameter with type `ty` to
/// `block`, but using value `val`. This is only for use by the parser to
/// create parameters with specific values.
#[cold]
pub fn append_ebb_param_for_parser(&mut self, ebb: Ebb, ty: Type, val: Value) {
let num = self.ebbs[ebb].params.push(val, &mut self.value_lists);
assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
pub fn append_block_param_for_parser(&mut self, block: Block, ty: Type, val: Value) {
let num = self.blocks[block].params.push(val, &mut self.value_lists);
assert!(num <= u16::MAX as usize, "Too many parameters on block");
self.values[val] = ValueData::Param {
ty,
num: num as u16,
ebb,
block,
};
}
@@ -1165,95 +1170,95 @@ mod tests {
}
#[test]
fn ebb() {
fn block() {
let mut dfg = DataFlowGraph::new();
let ebb = dfg.make_ebb();
assert_eq!(ebb.to_string(), "ebb0");
assert_eq!(dfg.num_ebb_params(ebb), 0);
assert_eq!(dfg.ebb_params(ebb), &[]);
assert!(dfg.detach_ebb_params(ebb).is_empty());
assert_eq!(dfg.num_ebb_params(ebb), 0);
assert_eq!(dfg.ebb_params(ebb), &[]);
let block = dfg.make_block();
assert_eq!(block.to_string(), "block0");
assert_eq!(dfg.num_block_params(block), 0);
assert_eq!(dfg.block_params(block), &[]);
assert!(dfg.detach_block_params(block).is_empty());
assert_eq!(dfg.num_block_params(block), 0);
assert_eq!(dfg.block_params(block), &[]);
let arg1 = dfg.append_ebb_param(ebb, types::F32);
let arg1 = dfg.append_block_param(block, types::F32);
assert_eq!(arg1.to_string(), "v0");
assert_eq!(dfg.num_ebb_params(ebb), 1);
assert_eq!(dfg.ebb_params(ebb), &[arg1]);
assert_eq!(dfg.num_block_params(block), 1);
assert_eq!(dfg.block_params(block), &[arg1]);
let arg2 = dfg.append_ebb_param(ebb, types::I16);
let arg2 = dfg.append_block_param(block, types::I16);
assert_eq!(arg2.to_string(), "v1");
assert_eq!(dfg.num_ebb_params(ebb), 2);
assert_eq!(dfg.ebb_params(ebb), &[arg1, arg2]);
assert_eq!(dfg.num_block_params(block), 2);
assert_eq!(dfg.block_params(block), &[arg1, arg2]);
assert_eq!(dfg.value_def(arg1), ValueDef::Param(ebb, 0));
assert_eq!(dfg.value_def(arg2), ValueDef::Param(ebb, 1));
assert_eq!(dfg.value_def(arg1), ValueDef::Param(block, 0));
assert_eq!(dfg.value_def(arg2), ValueDef::Param(block, 1));
assert_eq!(dfg.value_type(arg1), types::F32);
assert_eq!(dfg.value_type(arg2), types::I16);
// Swap the two EBB parameters.
let vlist = dfg.detach_ebb_params(ebb);
assert_eq!(dfg.num_ebb_params(ebb), 0);
assert_eq!(dfg.ebb_params(ebb), &[]);
// Swap the two block parameters.
let vlist = dfg.detach_block_params(block);
assert_eq!(dfg.num_block_params(block), 0);
assert_eq!(dfg.block_params(block), &[]);
assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]);
dfg.attach_ebb_param(ebb, arg2);
let arg3 = dfg.append_ebb_param(ebb, types::I32);
dfg.attach_ebb_param(ebb, arg1);
assert_eq!(dfg.ebb_params(ebb), &[arg2, arg3, arg1]);
dfg.attach_block_param(block, arg2);
let arg3 = dfg.append_block_param(block, types::I32);
dfg.attach_block_param(block, arg1);
assert_eq!(dfg.block_params(block), &[arg2, arg3, arg1]);
}
#[test]
fn replace_ebb_params() {
fn replace_block_params() {
let mut dfg = DataFlowGraph::new();
let ebb = dfg.make_ebb();
let arg1 = dfg.append_ebb_param(ebb, types::F32);
let block = dfg.make_block();
let arg1 = dfg.append_block_param(block, types::F32);
let new1 = dfg.replace_ebb_param(arg1, types::I64);
let new1 = dfg.replace_block_param(arg1, types::I64);
assert_eq!(dfg.value_type(arg1), types::F32);
assert_eq!(dfg.value_type(new1), types::I64);
assert_eq!(dfg.ebb_params(ebb), &[new1]);
assert_eq!(dfg.block_params(block), &[new1]);
dfg.attach_ebb_param(ebb, arg1);
assert_eq!(dfg.ebb_params(ebb), &[new1, arg1]);
dfg.attach_block_param(block, arg1);
assert_eq!(dfg.block_params(block), &[new1, arg1]);
let new2 = dfg.replace_ebb_param(arg1, types::I8);
let new2 = dfg.replace_block_param(arg1, types::I8);
assert_eq!(dfg.value_type(arg1), types::F32);
assert_eq!(dfg.value_type(new2), types::I8);
assert_eq!(dfg.ebb_params(ebb), &[new1, new2]);
assert_eq!(dfg.block_params(block), &[new1, new2]);
dfg.attach_ebb_param(ebb, arg1);
assert_eq!(dfg.ebb_params(ebb), &[new1, new2, arg1]);
dfg.attach_block_param(block, arg1);
assert_eq!(dfg.block_params(block), &[new1, new2, arg1]);
let new3 = dfg.replace_ebb_param(new2, types::I16);
let new3 = dfg.replace_block_param(new2, types::I16);
assert_eq!(dfg.value_type(new1), types::I64);
assert_eq!(dfg.value_type(new2), types::I8);
assert_eq!(dfg.value_type(new3), types::I16);
assert_eq!(dfg.ebb_params(ebb), &[new1, new3, arg1]);
assert_eq!(dfg.block_params(block), &[new1, new3, arg1]);
}
#[test]
fn swap_remove_ebb_params() {
fn swap_remove_block_params() {
let mut dfg = DataFlowGraph::new();
let ebb = dfg.make_ebb();
let arg1 = dfg.append_ebb_param(ebb, types::F32);
let arg2 = dfg.append_ebb_param(ebb, types::F32);
let arg3 = dfg.append_ebb_param(ebb, types::F32);
assert_eq!(dfg.ebb_params(ebb), &[arg1, arg2, arg3]);
let block = dfg.make_block();
let arg1 = dfg.append_block_param(block, types::F32);
let arg2 = dfg.append_block_param(block, types::F32);
let arg3 = dfg.append_block_param(block, types::F32);
assert_eq!(dfg.block_params(block), &[arg1, arg2, arg3]);
dfg.swap_remove_ebb_param(arg1);
dfg.swap_remove_block_param(arg1);
assert_eq!(dfg.value_is_attached(arg1), false);
assert_eq!(dfg.value_is_attached(arg2), true);
assert_eq!(dfg.value_is_attached(arg3), true);
assert_eq!(dfg.ebb_params(ebb), &[arg3, arg2]);
dfg.swap_remove_ebb_param(arg2);
assert_eq!(dfg.block_params(block), &[arg3, arg2]);
dfg.swap_remove_block_param(arg2);
assert_eq!(dfg.value_is_attached(arg2), false);
assert_eq!(dfg.value_is_attached(arg3), true);
assert_eq!(dfg.ebb_params(ebb), &[arg3]);
dfg.swap_remove_ebb_param(arg3);
assert_eq!(dfg.block_params(block), &[arg3]);
dfg.swap_remove_block_param(arg3);
assert_eq!(dfg.value_is_attached(arg3), false);
assert_eq!(dfg.ebb_params(ebb), &[]);
assert_eq!(dfg.block_params(block), &[]);
}
#[test]
@@ -1261,9 +1266,9 @@ mod tests {
use crate::ir::InstBuilder;
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
// Build a little test program.
let v1 = pos.ins().iconst(types::I32, 42);
@@ -1271,7 +1276,7 @@ mod tests {
// Make sure we can resolve value aliases even when values is empty.
assert_eq!(pos.func.dfg.resolve_aliases(v1), v1);
let arg0 = pos.func.dfg.append_ebb_param(ebb0, types::I32);
let arg0 = pos.func.dfg.append_block_param(block0, types::I32);
let (s, c) = pos.ins().iadd_ifcout(v1, arg0);
let iadd = match pos.func.dfg.value_def(s) {
ValueDef::Result(i, 0) => i,

View File

@@ -1,7 +1,7 @@
//! Cranelift IR entity references.
//!
//! Instructions in Cranelift IR need to reference other entities in the function. This can be other
//! parts of the function like extended basic blocks or stack slots, or it can be external entities
//! parts of the function like basic blocks or stack slots, or it can be external entities
//! that are declared in the function preamble in the text format.
//!
//! These entity references in instruction operands are not implemented as Rust references both
@@ -25,20 +25,19 @@ use core::u32;
#[cfg(feature = "enable-serde")]
use serde::{Deserialize, Serialize};
/// An opaque reference to an [extended basic
/// block](https://en.wikipedia.org/wiki/Extended_basic_block) in a
/// An opaque reference to a [basic block](https://en.wikipedia.org/wiki/Basic_block) in a
/// [`Function`](super::function::Function).
///
/// You can get an `Ebb` using
/// [`FunctionBuilder::create_ebb`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_ebb)
/// You can get a `Block` using
/// [`FunctionBuilder::create_block`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_block)
///
/// While the order is stable, it is arbitrary and does not necessarily resemble the layout order.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Ebb(u32);
entity_impl!(Ebb, "ebb");
pub struct Block(u32);
entity_impl!(Block, "block");
impl Ebb {
/// Create a new EBB reference from its number. This corresponds to the `ebbNN` representation.
impl Block {
/// Create a new block reference from its number. This corresponds to the `blockNN` representation.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Self> {
@@ -371,8 +370,8 @@ impl Table {
pub enum AnyEntity {
/// The whole function.
Function,
/// An extended basic block.
Ebb(Ebb),
/// a basic block.
Block(Block),
/// An instruction.
Inst(Inst),
/// An SSA value.
@@ -397,7 +396,7 @@ impl fmt::Display for AnyEntity {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Self::Function => write!(f, "function"),
Self::Ebb(r) => r.fmt(f),
Self::Block(r) => r.fmt(f),
Self::Inst(r) => r.fmt(f),
Self::Value(r) => r.fmt(f),
Self::StackSlot(r) => r.fmt(f),
@@ -417,9 +416,9 @@ impl fmt::Debug for AnyEntity {
}
}
impl From<Ebb> for AnyEntity {
fn from(r: Ebb) -> Self {
Self::Ebb(r)
impl From<Block> for AnyEntity {
fn from(r: Block) -> Self {
Self::Block(r)
}
}

View File

@@ -32,8 +32,8 @@ pub enum ExternalName {
/// Arbitrary.
index: u32,
},
/// A test case function name of up to 10 ascii characters. This is
/// not intended to be used outside test cases.
/// A test case function name of up to a hardcoded amount of ascii
/// characters. This is not intended to be used outside test cases.
TestCase {
/// How many of the bytes in `ascii` are valid?
length: u8,

View File

@@ -1,17 +1,17 @@
//! Intermediate representation of a function.
//!
//! The `Function` struct defined in this module owns all of its extended basic blocks and
//! The `Function` struct defined in this module owns all of its basic blocks and
//! instructions.
use crate::binemit::CodeOffset;
use crate::entity::{PrimaryMap, SecondaryMap};
use crate::ir;
use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature};
use crate::ir::{
Ebb, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, HeapData, Inst, JumpTable,
Block, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, HeapData, Inst, JumpTable,
JumpTableData, Opcode, SigRef, StackSlot, StackSlotData, Table, TableData,
};
use crate::ir::{EbbOffsets, FrameLayout, InstEncodings, SourceLocs, StackSlots, ValueLocations};
use crate::ir::{BlockOffsets, FrameLayout, InstEncodings, SourceLocs, StackSlots, ValueLocations};
use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature};
use crate::ir::{JumpTableOffsets, JumpTables};
use crate::isa::{CallConv, EncInfo, Encoding, Legalize, TargetIsa};
use crate::regalloc::{EntryRegDiversions, RegDiversions};
@@ -50,10 +50,10 @@ pub struct Function {
/// Jump tables used in this function.
pub jump_tables: JumpTables,
/// Data flow graph containing the primary definition of all instructions, EBBs and values.
/// Data flow graph containing the primary definition of all instructions, blocks and values.
pub dfg: DataFlowGraph,
/// Layout of EBBs and instructions in the function body.
/// Layout of blocks and instructions in the function body.
pub layout: Layout,
/// Encoding recipe and bits for the legal instructions.
@@ -69,12 +69,12 @@ pub struct Function {
/// ValueLocation. This field records these register-to-register moves as Diversions.
pub entry_diversions: EntryRegDiversions,
/// Code offsets of the EBB headers.
/// Code offsets of the block headers.
///
/// This information is only transiently available after the `binemit::relax_branches` function
/// computes it, and it can easily be recomputed by calling that function. It is not included
/// in the textual IR format.
pub offsets: EbbOffsets,
pub offsets: BlockOffsets,
/// Code offsets of Jump Table headers.
pub jt_offsets: JumpTableOffsets,
@@ -207,10 +207,10 @@ impl Function {
let entry = self.layout.entry_block().expect("Function is empty");
self.signature
.special_param_index(purpose)
.map(|i| self.dfg.ebb_params(entry)[i])
.map(|i| self.dfg.block_params(entry)[i])
}
/// Get an iterator over the instructions in `ebb`, including offsets and encoded instruction
/// Get an iterator over the instructions in `block`, including offsets and encoded instruction
/// sizes.
///
/// The iterator returns `(offset, inst, size)` tuples, where `offset` if the offset in bytes
@@ -219,20 +219,20 @@ impl Function {
///
/// This function can only be used after the code layout has been computed by the
/// `binemit::relax_branches()` function.
pub fn inst_offsets<'a>(&'a self, ebb: Ebb, encinfo: &EncInfo) -> InstOffsetIter<'a> {
pub fn inst_offsets<'a>(&'a self, block: Block, encinfo: &EncInfo) -> InstOffsetIter<'a> {
assert!(
!self.offsets.is_empty(),
"Code layout must be computed first"
);
let mut divert = RegDiversions::new();
divert.at_ebb(&self.entry_diversions, ebb);
divert.at_block(&self.entry_diversions, block);
InstOffsetIter {
encinfo: encinfo.clone(),
func: self,
divert,
encodings: &self.encodings,
offset: self.offsets[ebb],
iter: self.layout.ebb_insts(ebb),
offset: self.offsets[block],
iter: self.layout.block_insts(block),
}
}
@@ -260,19 +260,19 @@ impl Function {
/// Changes the destination of a jump or branch instruction.
/// Does nothing if called with a non-jump or non-branch instruction.
pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Ebb) {
pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Block) {
match self.dfg[inst].branch_destination_mut() {
None => (),
Some(inst_dest) => *inst_dest = new_dest,
}
}
/// Checks that the specified EBB can be encoded as a basic block.
/// Checks that the specified block can be encoded as a basic block.
///
/// On error, returns the first invalid instruction and an error message.
pub fn is_ebb_basic(&self, ebb: Ebb) -> Result<(), (Inst, &'static str)> {
pub fn is_block_basic(&self, block: Block) -> Result<(), (Inst, &'static str)> {
let dfg = &self.dfg;
let inst_iter = self.layout.ebb_insts(ebb);
let inst_iter = self.layout.block_insts(block);
// Ignore all instructions prior to the first branch.
let mut inst_iter = inst_iter.skip_while(|&inst| !dfg[inst].opcode().is_branch());

View File

@@ -13,7 +13,7 @@ use core::str::FromStr;
use crate::ir;
use crate::ir::types;
use crate::ir::{Ebb, FuncRef, JumpTable, SigRef, Type, Value};
use crate::ir::{Block, FuncRef, JumpTable, SigRef, Type, Value};
use crate::isa;
use crate::bitset::BitSet;
@@ -164,7 +164,7 @@ impl Default for VariableArgs {
impl InstructionData {
/// Return information about the destination of a branch or jump instruction.
///
/// Any instruction that can transfer control to another EBB reveals its possible destinations
/// Any instruction that can transfer control to another block reveals its possible destinations
/// here.
pub fn analyze_branch<'a>(&'a self, pool: &'a ValueListPool) -> BranchInfo<'a> {
match *self {
@@ -208,7 +208,7 @@ impl InstructionData {
/// branch or jump.
///
/// Multi-destination branches like `br_table` return `None`.
pub fn branch_destination(&self) -> Option<Ebb> {
pub fn branch_destination(&self) -> Option<Block> {
match *self {
Self::Jump { destination, .. }
| Self::Branch { destination, .. }
@@ -227,7 +227,7 @@ impl InstructionData {
/// single destination branch or jump.
///
/// Multi-destination branches like `br_table` return `None`.
pub fn branch_destination_mut(&mut self) -> Option<&mut Ebb> {
pub fn branch_destination_mut(&mut self) -> Option<&mut Block> {
match *self {
Self::Jump {
ref mut destination,
@@ -279,15 +279,15 @@ impl InstructionData {
/// Information about branch and jump instructions.
pub enum BranchInfo<'a> {
/// This is not a branch or jump instruction.
/// This instruction will not transfer control to another EBB in the function, but it may still
/// This instruction will not transfer control to another block in the function, but it may still
/// affect control flow by returning or trapping.
NotABranch,
/// This is a branch or jump to a single destination EBB, possibly taking value arguments.
SingleDest(Ebb, &'a [Value]),
/// This is a branch or jump to a single destination block, possibly taking value arguments.
SingleDest(Block, &'a [Value]),
/// This is a jump table branch which can have many destination EBBs and maybe one default EBB.
Table(JumpTable, Option<Ebb>),
/// This is a jump table branch which can have many destination blocks and maybe one default block.
Table(JumpTable, Option<Block>),
}
/// Information about call instructions.

View File

@@ -3,7 +3,7 @@
//! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference.
//! The actual table of destinations is stored in a `JumpTableData` struct defined in this module.
use crate::ir::entities::Ebb;
use crate::ir::entities::Block;
use alloc::vec::Vec;
use core::fmt::{self, Display, Formatter};
use core::slice::{Iter, IterMut};
@@ -14,7 +14,7 @@ use core::slice::{Iter, IterMut};
#[derive(Clone)]
pub struct JumpTableData {
// Table entries.
table: Vec<Ebb>,
table: Vec<Block>,
}
impl JumpTableData {
@@ -36,32 +36,32 @@ impl JumpTableData {
}
/// Append a table entry.
pub fn push_entry(&mut self, dest: Ebb) {
pub fn push_entry(&mut self, dest: Block) {
self.table.push(dest)
}
/// Checks if any of the entries branch to `ebb`.
pub fn branches_to(&self, ebb: Ebb) -> bool {
self.table.iter().any(|target_ebb| *target_ebb == ebb)
/// Checks if any of the entries branch to `block`.
pub fn branches_to(&self, block: Block) -> bool {
self.table.iter().any(|target_block| *target_block == block)
}
/// Access the whole table as a slice.
pub fn as_slice(&self) -> &[Ebb] {
pub fn as_slice(&self) -> &[Block] {
self.table.as_slice()
}
/// Access the whole table as a mutable slice.
pub fn as_mut_slice(&mut self) -> &mut [Ebb] {
pub fn as_mut_slice(&mut self) -> &mut [Block] {
self.table.as_mut_slice()
}
/// Returns an iterator over the table.
pub fn iter(&self) -> Iter<Ebb> {
pub fn iter(&self) -> Iter<Block> {
self.table.iter()
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<Ebb> {
pub fn iter_mut(&mut self) -> IterMut<Block> {
self.table.iter_mut()
}
}
@@ -73,8 +73,8 @@ impl Display for JumpTableData {
None => (),
Some(first) => write!(fmt, "{}", first)?,
}
for ebb in self.table.iter().skip(1) {
write!(fmt, ", {}", ebb)?;
for block in self.table.iter().skip(1) {
write!(fmt, ", {}", block)?;
}
write!(fmt, "]")
}
@@ -84,7 +84,7 @@ impl Display for JumpTableData {
mod tests {
use super::JumpTableData;
use crate::entity::EntityRef;
use crate::ir::Ebb;
use crate::ir::Block;
use alloc::string::ToString;
#[test]
@@ -102,8 +102,8 @@ mod tests {
#[test]
fn insert() {
let e1 = Ebb::new(1);
let e2 = Ebb::new(2);
let e1 = Block::new(1);
let e2 = Block::new(2);
let mut jt = JumpTableData::new();
@@ -111,7 +111,7 @@ mod tests {
jt.push_entry(e2);
jt.push_entry(e1);
assert_eq!(jt.to_string(), "jump_table [ebb1, ebb2, ebb1]");
assert_eq!(jt.to_string(), "jump_table [block1, block2, block1]");
let v = jt.as_slice();
assert_eq!(v, [e1, e2, e1]);

File diff suppressed because it is too large Load Diff

View File

@@ -33,7 +33,7 @@ pub use crate::ir::builder::{
pub use crate::ir::constant::{ConstantData, ConstantOffset, ConstantPool};
pub use crate::ir::dfg::{DataFlowGraph, ValueDef};
pub use crate::ir::entities::{
Constant, Ebb, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot,
Block, Constant, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot,
Table, Value,
};
pub use crate::ir::extfunc::{
@@ -73,8 +73,8 @@ pub type JumpTables = PrimaryMap<JumpTable, JumpTableData>;
/// Map of instruction encodings.
pub type InstEncodings = SecondaryMap<Inst, isa::Encoding>;
/// Code offsets for EBBs.
pub type EbbOffsets = SecondaryMap<Ebb, binemit::CodeOffset>;
/// Code offsets for blocks.
pub type BlockOffsets = SecondaryMap<Block, binemit::CodeOffset>;
/// Code offsets for Jump Tables.
pub type JumpTableOffsets = SecondaryMap<JumpTable, binemit::CodeOffset>;

View File

@@ -1,7 +1,7 @@
//! Program points.
use crate::entity::EntityRef;
use crate::ir::{Ebb, Inst, ValueDef};
use crate::ir::{Block, Inst, ValueDef};
use core::cmp;
use core::fmt;
use core::u32;
@@ -10,7 +10,7 @@ use core::u32;
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
/// 2. An block header.
///
/// This corresponds more or less to the lines in the textual form of Cranelift IR.
#[derive(PartialEq, Eq, Clone, Copy)]
@@ -24,9 +24,9 @@ impl From<Inst> for ProgramPoint {
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> Self {
let idx = ebb.index();
impl From<Block> for ProgramPoint {
fn from(block: Block) -> Self {
let idx = block.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
Self((idx * 2 + 1) as u32)
}
@@ -36,7 +36,7 @@ impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> Self {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
ValueDef::Param(block, _) => block.into(),
}
}
}
@@ -47,8 +47,8 @@ impl From<ValueDef> for ProgramPoint {
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
/// An block header.
Block(Block),
}
impl ExpandedProgramPoint {
@@ -56,7 +56,7 @@ impl ExpandedProgramPoint {
pub fn unwrap_inst(self) -> Inst {
match self {
Self::Inst(x) => x,
Self::Ebb(x) => panic!("expected inst: {}", x),
Self::Block(x) => panic!("expected inst: {}", x),
}
}
}
@@ -67,9 +67,9 @@ impl From<Inst> for ExpandedProgramPoint {
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> Self {
Self::Ebb(ebb)
impl From<Block> for ExpandedProgramPoint {
fn from(block: Block) -> Self {
Self::Block(block)
}
}
@@ -77,7 +77,7 @@ impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> Self {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
ValueDef::Param(block, _) => block.into(),
}
}
}
@@ -87,7 +87,7 @@ impl From<ProgramPoint> for ExpandedProgramPoint {
if pp.0 & 1 == 0 {
Self::Inst(Inst::from_u32(pp.0 / 2))
} else {
Self::Ebb(Ebb::from_u32(pp.0 / 2))
Self::Block(Block::from_u32(pp.0 / 2))
}
}
}
@@ -96,7 +96,7 @@ impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Self::Inst(x) => write!(f, "{}", x),
Self::Ebb(x) => write!(f, "{}", x),
Self::Block(x) => write!(f, "{}", x),
}
}
}
@@ -129,7 +129,7 @@ pub trait ProgramOrder {
///
/// Return `Less` if `a` appears in the program before `b`.
///
/// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// This is declared as a generic such that it can be called with `Inst` and `Block` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
@@ -137,28 +137,28 @@ pub trait ProgramOrder {
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
/// Is the range from `inst` to `block` just the gap between consecutive blocks?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
/// This returns true if `inst` is the terminator in the block immediately before `block`.
fn is_block_gap(&self, inst: Inst, block: Block) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::entity::EntityRef;
use crate::ir::{Ebb, Inst};
use crate::ir::{Block, Inst};
use alloc::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let b3 = Block::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");
assert_eq!(pp2.to_string(), "block3");
}
}

View File

@@ -95,7 +95,7 @@ pub struct RecipeConstraints {
/// If the instruction takes a variable number of operands, the register constraints for those
/// operands must be computed dynamically.
///
/// - For branches and jumps, EBB arguments must match the expectations of the destination EBB.
/// - For branches and jumps, block arguments must match the expectations of the destination block.
/// - For calls and returns, the calling convention ABI specifies constraints.
pub ins: &'static [OperandConstraint],
@@ -173,7 +173,7 @@ pub struct BranchRange {
impl BranchRange {
/// Determine if this branch range can represent the range from `branch` to `dest`, where
/// `branch` is the code offset of the branch instruction itself and `dest` is the code offset
/// of the destination EBB header.
/// of the destination block header.
///
/// This method does not detect if the range is larger than 2 GB.
pub fn contains(self, branch: CodeOffset, dest: CodeOffset) -> bool {

View File

@@ -158,9 +158,9 @@ mod tests {
.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg64 = func.dfg.append_ebb_param(ebb, types::I64);
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
let block = func.dfg.make_block();
let arg64 = func.dfg.append_block_param(block, types::I64);
let arg32 = func.dfg.append_block_param(block, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm {
@@ -209,9 +209,9 @@ mod tests {
.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg64 = func.dfg.append_ebb_param(ebb, types::I64);
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
let block = func.dfg.make_block();
let arg64 = func.dfg.append_block_param(block, types::I64);
let arg32 = func.dfg.append_block_param(block, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm {
@@ -268,8 +268,8 @@ mod tests {
let isa = isa_builder.finish(shared_flags);
let mut func = Function::new();
let ebb = func.dfg.make_ebb();
let arg32 = func.dfg.append_ebb_param(ebb, types::I32);
let block = func.dfg.make_block();
let arg32 = func.dfg.append_block_param(block, types::I32);
// Create an imul.i32 which is encodable in RV32M.
let mul32 = InstructionData::Binary {

View File

@@ -419,8 +419,8 @@ fn callee_saved_gprs_used(isa: &dyn TargetIsa, func: &ir::Function) -> RegisterS
//
// TODO: Consider re-evaluating how regmove/regfill/regspill work and whether it's possible
// to avoid this step.
for ebb in &func.layout {
for inst in func.layout.ebb_insts(ebb) {
for block in &func.layout {
for inst in func.layout.block_insts(block) {
match func.dfg[inst] {
ir::instructions::InstructionData::RegMove { dst, .. }
| ir::instructions::InstructionData::RegFill { dst, .. } => {
@@ -551,8 +551,8 @@ fn fastcall_prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> C
}
// Set up the cursor and insert the prologue
let entry_ebb = func.layout.entry_block().expect("missing entry block");
let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_ebb);
let entry_block = func.layout.entry_block().expect("missing entry block");
let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_block);
let prologue_cfa_state =
insert_common_prologue(&mut pos, local_stack_size, reg_type, &csrs, isa);
@@ -612,8 +612,8 @@ fn system_v_prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> C
}
// Set up the cursor and insert the prologue
let entry_ebb = func.layout.entry_block().expect("missing entry block");
let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_ebb);
let entry_block = func.layout.entry_block().expect("missing entry block");
let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_block);
let prologue_cfa_state =
insert_common_prologue(&mut pos, local_stack_size, reg_type, &csrs, isa);
@@ -678,9 +678,9 @@ fn insert_common_prologue(
None
};
// Append param to entry EBB
let ebb = pos.current_ebb().expect("missing ebb under cursor");
let fp = pos.func.dfg.append_ebb_param(ebb, reg_type);
// Append param to entry block
let block = pos.current_block().expect("missing block under cursor");
let fp = pos.func.dfg.append_block_param(block, reg_type);
pos.func.locations[fp] = ir::ValueLoc::Reg(RU::rbp as RegUnit);
let push_fp_inst = pos.ins().x86_push(fp);
@@ -727,8 +727,8 @@ fn insert_common_prologue(
}
for reg in csrs.iter(GPR) {
// Append param to entry EBB
let csr_arg = pos.func.dfg.append_ebb_param(ebb, reg_type);
// Append param to entry block
let csr_arg = pos.func.dfg.append_block_param(block, reg_type);
// Assign it a location
pos.func.locations[csr_arg] = ir::ValueLoc::Reg(reg);
@@ -831,11 +831,11 @@ fn insert_common_epilogues(
isa: &dyn TargetIsa,
cfa_state: Option<CFAState>,
) {
while let Some(ebb) = pos.next_ebb() {
pos.goto_last_inst(ebb);
while let Some(block) = pos.next_block() {
pos.goto_last_inst(block);
if let Some(inst) = pos.current_inst() {
if pos.func.dfg[inst].opcode().is_return() {
let is_last = pos.func.layout.last_ebb() == Some(ebb);
let is_last = pos.func.layout.last_block() == Some(block);
insert_common_epilogue(
inst,
stack_size,

View File

@@ -4,7 +4,7 @@ use super::enc_tables::{needs_offset, needs_sib_byte};
use super::registers::RU;
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::condcodes::{CondCode, FloatCC, IntCC};
use crate::ir::{Constant, Ebb, Function, Inst, InstructionData, JumpTable, Opcode, TrapCode};
use crate::ir::{Block, Constant, Function, Inst, InstructionData, JumpTable, Opcode, TrapCode};
use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef, TargetIsa};
use crate::regalloc::RegDiversions;
@@ -369,13 +369,13 @@ fn fcc2opc(cond: FloatCC) -> u16 {
}
/// Emit a single-byte branch displacement to `destination`.
fn disp1<CS: CodeSink + ?Sized>(destination: Ebb, func: &Function, sink: &mut CS) {
fn disp1<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 1);
sink.put1(delta as u8);
}
/// Emit a four-byte branch displacement to `destination`.
fn disp4<CS: CodeSink + ?Sized>(destination: Ebb, func: &Function, sink: &mut CS) {
fn disp4<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 4);
sink.put4(delta);
}

View File

@@ -253,7 +253,7 @@ fn expand_sdivrem(
_ => panic!("Need sdiv/srem: {}", func.dfg.display_inst(inst, None)),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
@@ -297,17 +297,17 @@ fn expand_sdivrem(
return;
}
// EBB handling the nominal case.
let nominal = pos.func.dfg.make_ebb();
// block handling the nominal case.
let nominal = pos.func.dfg.make_block();
// EBB handling the -1 divisor case.
let minus_one = pos.func.dfg.make_ebb();
// block handling the -1 divisor case.
let minus_one = pos.func.dfg.make_block();
// Final EBB with one argument representing the final result value.
let done = pos.func.dfg.make_ebb();
// Final block with one argument representing the final result value.
let done = pos.func.dfg.make_block();
// Move the `inst` result value onto the `done` EBB.
pos.func.dfg.attach_ebb_param(done, result);
// Move the `inst` result value onto the `done` block.
pos.func.dfg.attach_block_param(done, result);
// Start by checking for a -1 divisor which needs to be handled specially.
let is_m1 = pos.ins().ifcmp_imm(y, -1);
@@ -316,14 +316,14 @@ fn expand_sdivrem(
// Now it is safe to execute the `x86_sdivmodx` instruction which will still trap on division
// by zero.
pos.insert_ebb(nominal);
pos.insert_block(nominal);
let xhi = pos.ins().sshr_imm(x, i64::from(ty.lane_bits()) - 1);
let (quot, rem) = pos.ins().x86_sdivmodx(x, xhi, y);
let divres = if is_srem { rem } else { quot };
pos.ins().jump(done, &[divres]);
// Now deal with the -1 divisor case.
pos.insert_ebb(minus_one);
pos.insert_block(minus_one);
let m1_result = if is_srem {
// x % -1 = 0.
pos.ins().iconst(ty, 0)
@@ -342,12 +342,12 @@ fn expand_sdivrem(
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, nominal);
cfg.recompute_ebb(pos.func, minus_one);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, nominal);
cfg.recompute_block(pos.func, minus_one);
cfg.recompute_block(pos.func, done);
}
/// Expand the `udiv` and `urem` instructions using `x86_udivmodx`.
@@ -421,7 +421,7 @@ fn expand_minmax(
} => (args[0], args[1], ir::Opcode::X86Fmax, ir::Opcode::Band),
_ => panic!("Expected fmin/fmax: {}", func.dfg.display_inst(inst, None)),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
// We need to handle the following conditions, depending on how x and y compare:
//
@@ -430,20 +430,20 @@ fn expand_minmax(
// fmin(0.0, -0.0) -> -0.0 and fmax(0.0, -0.0) -> 0.0.
// 3. UN: We need to produce a quiet NaN that is canonical if the inputs are canonical.
// EBB handling case 1) where operands are ordered but not equal.
let one_ebb = func.dfg.make_ebb();
// block handling case 1) where operands are ordered but not equal.
let one_block = func.dfg.make_block();
// EBB handling case 3) where one operand is NaN.
let uno_ebb = func.dfg.make_ebb();
// block handling case 3) where one operand is NaN.
let uno_block = func.dfg.make_block();
// EBB that handles the unordered or equal cases 2) and 3).
let ueq_ebb = func.dfg.make_ebb();
// block that handles the unordered or equal cases 2) and 3).
let ueq_block = func.dfg.make_block();
// EBB handling case 2) where operands are ordered and equal.
let eq_ebb = func.dfg.make_ebb();
// block handling case 2) where operands are ordered and equal.
let eq_block = func.dfg.make_block();
// Final EBB with one argument representing the final result value.
let done = func.dfg.make_ebb();
// Final block with one argument representing the final result value.
let done = func.dfg.make_block();
// The basic blocks are laid out to minimize branching for the common cases:
//
@@ -451,21 +451,21 @@ fn expand_minmax(
// 2) One branch taken.
// 3) Two branches taken, one jump.
// Move the `inst` result value onto the `done` EBB.
// Move the `inst` result value onto the `done` block.
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
func.dfg.clear_results(inst);
func.dfg.attach_ebb_param(done, result);
func.dfg.attach_block_param(done, result);
// Test for case 1) ordered and not equal.
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
let cmp_ueq = pos.ins().fcmp(FloatCC::UnorderedOrEqual, x, y);
pos.ins().brnz(cmp_ueq, ueq_ebb, &[]);
pos.ins().jump(one_ebb, &[]);
pos.ins().brnz(cmp_ueq, ueq_block, &[]);
pos.ins().jump(one_block, &[]);
// Handle the common ordered, not equal (LT|GT) case.
pos.insert_ebb(one_ebb);
pos.insert_block(one_block);
let one_inst = pos.ins().Binary(x86_opc, ty, x, y).0;
let one_result = pos.func.dfg.first_result(one_inst);
pos.ins().jump(done, &[one_result]);
@@ -473,21 +473,21 @@ fn expand_minmax(
// Case 3) Unordered.
// We know that at least one operand is a NaN that needs to be propagated. We simply use an
// `fadd` instruction which has the same NaN propagation semantics.
pos.insert_ebb(uno_ebb);
pos.insert_block(uno_block);
let uno_result = pos.ins().fadd(x, y);
pos.ins().jump(done, &[uno_result]);
// Case 2) or 3).
pos.insert_ebb(ueq_ebb);
pos.insert_block(ueq_block);
// Test for case 3) (UN) one value is NaN.
// TODO: When we get support for flag values, we can reuse the above comparison.
let cmp_uno = pos.ins().fcmp(FloatCC::Unordered, x, y);
pos.ins().brnz(cmp_uno, uno_ebb, &[]);
pos.ins().jump(eq_ebb, &[]);
pos.ins().brnz(cmp_uno, uno_block, &[]);
pos.ins().jump(eq_block, &[]);
// We are now in case 2) where x and y compare EQ.
// We need a bitwise operation to get the sign right.
pos.insert_ebb(eq_ebb);
pos.insert_block(eq_block);
let bw_inst = pos.ins().Binary(bitwise_opc, ty, x, y).0;
let bw_result = pos.func.dfg.first_result(bw_inst);
// This should become a fall-through for this second most common case.
@@ -496,14 +496,14 @@ fn expand_minmax(
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, one_ebb);
cfg.recompute_ebb(pos.func, uno_ebb);
cfg.recompute_ebb(pos.func, ueq_ebb);
cfg.recompute_ebb(pos.func, eq_ebb);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, one_block);
cfg.recompute_block(pos.func, uno_block);
cfg.recompute_block(pos.func, ueq_block);
cfg.recompute_block(pos.func, eq_block);
cfg.recompute_block(pos.func, done);
}
/// x86 has no unsigned-to-float conversions. We handle the easy case of zero-extending i32 to
@@ -540,33 +540,33 @@ fn expand_fcvt_from_uint(
_ => unimplemented!(),
}
let old_ebb = pos.func.layout.pp_ebb(inst);
let old_block = pos.func.layout.pp_block(inst);
// EBB handling the case where x >= 0.
let poszero_ebb = pos.func.dfg.make_ebb();
// block handling the case where x >= 0.
let poszero_block = pos.func.dfg.make_block();
// EBB handling the case where x < 0.
let neg_ebb = pos.func.dfg.make_ebb();
// block handling the case where x < 0.
let neg_block = pos.func.dfg.make_block();
// Final EBB with one argument representing the final result value.
let done = pos.func.dfg.make_ebb();
// Final block with one argument representing the final result value.
let done = pos.func.dfg.make_block();
// Move the `inst` result value onto the `done` EBB.
// Move the `inst` result value onto the `done` block.
pos.func.dfg.clear_results(inst);
pos.func.dfg.attach_ebb_param(done, result);
pos.func.dfg.attach_block_param(done, result);
// If x as a signed int is not negative, we can use the existing `fcvt_from_sint` instruction.
let is_neg = pos.ins().icmp_imm(IntCC::SignedLessThan, x, 0);
pos.ins().brnz(is_neg, neg_ebb, &[]);
pos.ins().jump(poszero_ebb, &[]);
pos.ins().brnz(is_neg, neg_block, &[]);
pos.ins().jump(poszero_block, &[]);
// Easy case: just use a signed conversion.
pos.insert_ebb(poszero_ebb);
pos.insert_block(poszero_block);
let posres = pos.ins().fcvt_from_sint(ty, x);
pos.ins().jump(done, &[posres]);
// Now handle the negative case.
pos.insert_ebb(neg_ebb);
pos.insert_block(neg_block);
// Divide x by two to get it in range for the signed conversion, keep the LSB, and scale it
// back up on the FP side.
@@ -581,12 +581,12 @@ fn expand_fcvt_from_uint(
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, poszero_ebb);
cfg.recompute_ebb(pos.func, neg_ebb);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, poszero_block);
cfg.recompute_block(pos.func, neg_block);
cfg.recompute_block(pos.func, done);
}
fn expand_fcvt_to_sint(
@@ -604,16 +604,16 @@ fn expand_fcvt_to_sint(
} => arg,
_ => panic!("Need fcvt_to_sint: {}", func.dfg.display_inst(inst, None)),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
let xty = func.dfg.value_type(x);
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
// Final EBB after the bad value checks.
let done = func.dfg.make_ebb();
// Final block after the bad value checks.
let done = func.dfg.make_block();
// EBB for checking failure cases.
let maybe_trap_ebb = func.dfg.make_ebb();
// block for checking failure cases.
let maybe_trap_block = func.dfg.make_block();
// The `x86_cvtt2si` performs the desired conversion, but it doesn't trap on NaN or overflow.
// It produces an INT_MIN result instead.
@@ -626,7 +626,7 @@ fn expand_fcvt_to_sint(
.ins()
.icmp_imm(IntCC::NotEqual, result, 1 << (ty.lane_bits() - 1));
pos.ins().brnz(is_done, done, &[]);
pos.ins().jump(maybe_trap_ebb, &[]);
pos.ins().jump(maybe_trap_block, &[]);
// We now have the following possibilities:
//
@@ -634,7 +634,7 @@ fn expand_fcvt_to_sint(
// 2. The input was NaN -> trap bad_toint
// 3. The input was out of range -> trap int_ovf
//
pos.insert_ebb(maybe_trap_ebb);
pos.insert_block(maybe_trap_block);
// Check for NaN.
let is_nan = pos.ins().fcmp(FloatCC::Unordered, x, x);
@@ -683,11 +683,11 @@ fn expand_fcvt_to_sint(
pos.ins().trapnz(overflow, ir::TrapCode::IntegerOverflow);
pos.ins().jump(done, &[]);
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, maybe_trap_ebb);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, maybe_trap_block);
cfg.recompute_block(pos.func, done);
}
fn expand_fcvt_to_sint_sat(
@@ -709,18 +709,18 @@ fn expand_fcvt_to_sint_sat(
),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
let xty = func.dfg.value_type(x);
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
// Final EBB after the bad value checks.
let done_ebb = func.dfg.make_ebb();
let intmin_ebb = func.dfg.make_ebb();
let minsat_ebb = func.dfg.make_ebb();
let maxsat_ebb = func.dfg.make_ebb();
// Final block after the bad value checks.
let done_block = func.dfg.make_block();
let intmin_block = func.dfg.make_block();
let minsat_block = func.dfg.make_block();
let maxsat_block = func.dfg.make_block();
func.dfg.clear_results(inst);
func.dfg.attach_ebb_param(done_ebb, result);
func.dfg.attach_block_param(done_block, result);
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
@@ -732,25 +732,25 @@ fn expand_fcvt_to_sint_sat(
let is_done = pos
.ins()
.icmp_imm(IntCC::NotEqual, cvtt2si, 1 << (ty.lane_bits() - 1));
pos.ins().brnz(is_done, done_ebb, &[cvtt2si]);
pos.ins().jump(intmin_ebb, &[]);
pos.ins().brnz(is_done, done_block, &[cvtt2si]);
pos.ins().jump(intmin_block, &[]);
// We now have the following possibilities:
//
// 1. INT_MIN was actually the correct conversion result.
// 2. The input was NaN -> replace the result value with 0.
// 3. The input was out of range -> saturate the result to the min/max value.
pos.insert_ebb(intmin_ebb);
pos.insert_block(intmin_block);
// Check for NaN, which is truncated to 0.
let zero = pos.ins().iconst(ty, 0);
let is_nan = pos.ins().fcmp(FloatCC::Unordered, x, x);
pos.ins().brnz(is_nan, done_ebb, &[zero]);
pos.ins().jump(minsat_ebb, &[]);
pos.ins().brnz(is_nan, done_block, &[zero]);
pos.ins().jump(minsat_block, &[]);
// Check for case 1: INT_MIN is the correct result.
// Determine the smallest floating point number that would convert to INT_MIN.
pos.insert_ebb(minsat_ebb);
pos.insert_block(minsat_block);
let mut overflow_cc = FloatCC::LessThan;
let output_bits = ty.lane_bits();
let flimit = match xty {
@@ -786,11 +786,11 @@ fn expand_fcvt_to_sint_sat(
_ => panic!("Don't know the min value for {}", ty),
};
let min_value = pos.ins().iconst(ty, min_imm);
pos.ins().brnz(overflow, done_ebb, &[min_value]);
pos.ins().jump(maxsat_ebb, &[]);
pos.ins().brnz(overflow, done_block, &[min_value]);
pos.ins().jump(maxsat_block, &[]);
// Finally, we could have a positive value that is too large.
pos.insert_ebb(maxsat_ebb);
pos.insert_block(maxsat_block);
let fzero = match xty {
ir::types::F32 => pos.ins().f32const(Ieee32::with_bits(0)),
ir::types::F64 => pos.ins().f64const(Ieee64::with_bits(0)),
@@ -805,20 +805,20 @@ fn expand_fcvt_to_sint_sat(
let max_value = pos.ins().iconst(ty, max_imm);
let overflow = pos.ins().fcmp(FloatCC::GreaterThanOrEqual, x, fzero);
pos.ins().brnz(overflow, done_ebb, &[max_value]);
pos.ins().brnz(overflow, done_block, &[max_value]);
// Recycle the original instruction.
pos.func.dfg.replace(inst).jump(done_ebb, &[cvtt2si]);
pos.func.dfg.replace(inst).jump(done_block, &[cvtt2si]);
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done_ebb);
pos.insert_block(done_block);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, intmin_ebb);
cfg.recompute_ebb(pos.func, minsat_ebb);
cfg.recompute_ebb(pos.func, maxsat_ebb);
cfg.recompute_ebb(pos.func, done_ebb);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, intmin_block);
cfg.recompute_block(pos.func, minsat_block);
cfg.recompute_block(pos.func, maxsat_block);
cfg.recompute_block(pos.func, done_block);
}
fn expand_fcvt_to_uint(
@@ -837,26 +837,26 @@ fn expand_fcvt_to_uint(
_ => panic!("Need fcvt_to_uint: {}", func.dfg.display_inst(inst, None)),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
let xty = func.dfg.value_type(x);
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
// EBB handle numbers < 2^(N-1).
let below_uint_max_ebb = func.dfg.make_ebb();
// block handle numbers < 2^(N-1).
let below_uint_max_block = func.dfg.make_block();
// EBB handle numbers < 0.
let below_zero_ebb = func.dfg.make_ebb();
// block handle numbers < 0.
let below_zero_block = func.dfg.make_block();
// EBB handling numbers >= 2^(N-1).
let large = func.dfg.make_ebb();
// block handling numbers >= 2^(N-1).
let large = func.dfg.make_block();
// Final EBB after the bad value checks.
let done = func.dfg.make_ebb();
// Final block after the bad value checks.
let done = func.dfg.make_block();
// Move the `inst` result value onto the `done` EBB.
// Move the `inst` result value onto the `done` block.
func.dfg.clear_results(inst);
func.dfg.attach_ebb_param(done, result);
func.dfg.attach_block_param(done, result);
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
@@ -871,11 +871,11 @@ fn expand_fcvt_to_uint(
let is_large = pos.ins().ffcmp(x, pow2nm1);
pos.ins()
.brff(FloatCC::GreaterThanOrEqual, is_large, large, &[]);
pos.ins().jump(below_uint_max_ebb, &[]);
pos.ins().jump(below_uint_max_block, &[]);
// We need to generate a specific trap code when `x` is NaN, so reuse the flags from the
// previous comparison.
pos.insert_ebb(below_uint_max_ebb);
pos.insert_block(below_uint_max_block);
pos.ins().trapff(
FloatCC::Unordered,
is_large,
@@ -887,13 +887,13 @@ fn expand_fcvt_to_uint(
let is_neg = pos.ins().ifcmp_imm(sres, 0);
pos.ins()
.brif(IntCC::SignedGreaterThanOrEqual, is_neg, done, &[sres]);
pos.ins().jump(below_zero_ebb, &[]);
pos.ins().jump(below_zero_block, &[]);
pos.insert_ebb(below_zero_ebb);
pos.insert_block(below_zero_block);
pos.ins().trap(ir::TrapCode::IntegerOverflow);
// Handle the case where x >= 2^(N-1) and not NaN.
pos.insert_ebb(large);
pos.insert_block(large);
let adjx = pos.ins().fsub(x, pow2nm1);
let lres = pos.ins().x86_cvtt2si(ty, adjx);
let is_neg = pos.ins().ifcmp_imm(lres, 0);
@@ -906,13 +906,13 @@ fn expand_fcvt_to_uint(
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, below_uint_max_ebb);
cfg.recompute_ebb(pos.func, below_zero_ebb);
cfg.recompute_ebb(pos.func, large);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, below_uint_max_block);
cfg.recompute_block(pos.func, below_zero_block);
cfg.recompute_block(pos.func, large);
cfg.recompute_block(pos.func, done);
}
fn expand_fcvt_to_uint_sat(
@@ -934,27 +934,27 @@ fn expand_fcvt_to_uint_sat(
),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
let xty = func.dfg.value_type(x);
let result = func.dfg.first_result(inst);
let ty = func.dfg.value_type(result);
// EBB handle numbers < 2^(N-1).
let below_pow2nm1_or_nan_ebb = func.dfg.make_ebb();
let below_pow2nm1_ebb = func.dfg.make_ebb();
// block handle numbers < 2^(N-1).
let below_pow2nm1_or_nan_block = func.dfg.make_block();
let below_pow2nm1_block = func.dfg.make_block();
// EBB handling numbers >= 2^(N-1).
let large = func.dfg.make_ebb();
// block handling numbers >= 2^(N-1).
let large = func.dfg.make_block();
// EBB handling numbers < 2^N.
let uint_large_ebb = func.dfg.make_ebb();
// block handling numbers < 2^N.
let uint_large_block = func.dfg.make_block();
// Final EBB after the bad value checks.
let done = func.dfg.make_ebb();
// Final block after the bad value checks.
let done = func.dfg.make_block();
// Move the `inst` result value onto the `done` EBB.
// Move the `inst` result value onto the `done` block.
func.dfg.clear_results(inst);
func.dfg.attach_ebb_param(done, result);
func.dfg.attach_block_param(done, result);
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
@@ -970,16 +970,16 @@ fn expand_fcvt_to_uint_sat(
let is_large = pos.ins().ffcmp(x, pow2nm1);
pos.ins()
.brff(FloatCC::GreaterThanOrEqual, is_large, large, &[]);
pos.ins().jump(below_pow2nm1_or_nan_ebb, &[]);
pos.ins().jump(below_pow2nm1_or_nan_block, &[]);
// We need to generate zero when `x` is NaN, so reuse the flags from the previous comparison.
pos.insert_ebb(below_pow2nm1_or_nan_ebb);
pos.insert_block(below_pow2nm1_or_nan_block);
pos.ins().brff(FloatCC::Unordered, is_large, done, &[zero]);
pos.ins().jump(below_pow2nm1_ebb, &[]);
pos.ins().jump(below_pow2nm1_block, &[]);
// Now we know that x < 2^(N-1) and not NaN. If the result of the cvtt2si is positive, we're
// done; otherwise saturate to the minimum unsigned value, that is 0.
pos.insert_ebb(below_pow2nm1_ebb);
pos.insert_block(below_pow2nm1_block);
let sres = pos.ins().x86_cvtt2si(ty, x);
let is_neg = pos.ins().ifcmp_imm(sres, 0);
pos.ins()
@@ -987,7 +987,7 @@ fn expand_fcvt_to_uint_sat(
pos.ins().jump(done, &[zero]);
// Handle the case where x >= 2^(N-1) and not NaN.
pos.insert_ebb(large);
pos.insert_block(large);
let adjx = pos.ins().fsub(x, pow2nm1);
let lres = pos.ins().x86_cvtt2si(ty, adjx);
let max_value = pos.ins().iconst(
@@ -1001,9 +1001,9 @@ fn expand_fcvt_to_uint_sat(
let is_neg = pos.ins().ifcmp_imm(lres, 0);
pos.ins()
.brif(IntCC::SignedLessThan, is_neg, done, &[max_value]);
pos.ins().jump(uint_large_ebb, &[]);
pos.ins().jump(uint_large_block, &[]);
pos.insert_ebb(uint_large_ebb);
pos.insert_block(uint_large_block);
let lfinal = pos.ins().iadd_imm(lres, 1 << (ty.lane_bits() - 1));
// Recycle the original instruction as a jump.
@@ -1011,14 +1011,14 @@ fn expand_fcvt_to_uint_sat(
// Finally insert a label for the completion.
pos.next_inst();
pos.insert_ebb(done);
pos.insert_block(done);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, below_pow2nm1_or_nan_ebb);
cfg.recompute_ebb(pos.func, below_pow2nm1_ebb);
cfg.recompute_ebb(pos.func, large);
cfg.recompute_ebb(pos.func, uint_large_ebb);
cfg.recompute_ebb(pos.func, done);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, below_pow2nm1_or_nan_block);
cfg.recompute_block(pos.func, below_pow2nm1_block);
cfg.recompute_block(pos.func, large);
cfg.recompute_block(pos.func, uint_large_block);
cfg.recompute_block(pos.func, done);
}
/// Convert shuffle instructions.

View File

@@ -182,14 +182,14 @@ pub fn emit_fde(func: &Function, isa: &dyn TargetIsa, sink: &mut dyn FrameUnwind
assert!(func.frame_layout.is_some(), "expected func.frame_layout");
let frame_layout = func.frame_layout.as_ref().unwrap();
let mut ebbs = func.layout.ebbs().collect::<Vec<_>>();
ebbs.sort_by_key(|ebb| func.offsets[*ebb]); // Ensure inst offsets always increase
let mut blocks = func.layout.blocks().collect::<Vec<_>>();
blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
let encinfo = isa.encoding_info();
let mut last_offset = 0;
let mut changes = Vec::new();
for ebb in ebbs {
for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) {
for block in blocks {
for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
let address_offset = (offset + size) as usize;
assert!(last_offset <= address_offset);
if let Some(cmds) = frame_layout.instructions.get(&inst) {
@@ -343,9 +343,9 @@ mod tests {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let ebb0 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
@@ -411,20 +411,20 @@ mod tests {
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let ebb0 = func.dfg.make_ebb();
let v0 = func.dfg.append_ebb_param(ebb0, types::I32);
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.ins().brnz(v0, ebb2, &[]);
pos.ins().jump(ebb1, &[]);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_ebb(ebb1);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_ebb(ebb2);
pos.insert_block(block2);
pos.ins().trap(TrapCode::User(0));
func

View File

@@ -127,7 +127,7 @@ impl UnwindInfo {
}
let prologue_end = func.prologue_end.unwrap();
let entry_block = func.layout.ebbs().nth(0).expect("missing entry block");
let entry_block = func.layout.blocks().nth(0).expect("missing entry block");
// Stores the stack size when SP is not adjusted via an immediate value
let mut stack_size = None;
@@ -519,9 +519,9 @@ mod tests {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let ebb0 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {

View File

@@ -22,7 +22,7 @@ use crate::cursor::{Cursor, FuncCursor};
use crate::flowgraph::ControlFlowGraph;
use crate::ir::instructions::CallInfo;
use crate::ir::{
AbiParam, ArgumentLoc, ArgumentPurpose, DataFlowGraph, Ebb, Function, Inst, InstBuilder,
AbiParam, ArgumentLoc, ArgumentPurpose, Block, DataFlowGraph, Function, Inst, InstBuilder,
MemFlags, SigRef, Signature, StackSlotData, StackSlotKind, Type, Value, ValueLoc,
};
use crate::isa::TargetIsa;
@@ -84,12 +84,12 @@ fn legalize_signature(
/// Legalize the entry block parameters after `func`'s signature has been legalized.
///
/// The legalized signature may contain more parameters than the original signature, and the
/// parameter types have been changed. This function goes through the parameters of the entry EBB
/// parameter types have been changed. This function goes through the parameters of the entry block
/// and replaces them with parameters of the right type for the ABI.
///
/// The original entry EBB parameters are computed from the new ABI parameters by code inserted at
/// The original entry block parameters are computed from the new ABI parameters by code inserted at
/// the top of the entry block.
fn legalize_entry_params(func: &mut Function, entry: Ebb) {
fn legalize_entry_params(func: &mut Function, entry: Block) {
let mut has_sret = false;
let mut has_link = false;
let mut has_vmctx = false;
@@ -104,19 +104,19 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
// Keep track of the argument types in the ABI-legalized signature.
let mut abi_arg = 0;
// Process the EBB parameters one at a time, possibly replacing one argument with multiple new
// ones. We do this by detaching the entry EBB parameters first.
let ebb_params = pos.func.dfg.detach_ebb_params(entry);
// Process the block parameters one at a time, possibly replacing one argument with multiple new
// ones. We do this by detaching the entry block parameters first.
let block_params = pos.func.dfg.detach_block_params(entry);
let mut old_arg = 0;
while let Some(arg) = ebb_params.get(old_arg, &pos.func.dfg.value_lists) {
while let Some(arg) = block_params.get(old_arg, &pos.func.dfg.value_lists) {
old_arg += 1;
let abi_type = pos.func.signature.params[abi_arg];
let arg_type = pos.func.dfg.value_type(arg);
if arg_type == abi_type.value_type {
// No value translation is necessary, this argument matches the ABI type.
// Just use the original EBB argument value. This is the most common case.
pos.func.dfg.attach_ebb_param(entry, arg);
// Just use the original block argument value. This is the most common case.
pos.func.dfg.attach_block_param(entry, arg);
match abi_type.purpose {
ArgumentPurpose::Normal => {}
ArgumentPurpose::FramePointer => {}
@@ -151,13 +151,13 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
);
if ty == abi_type.value_type {
abi_arg += 1;
Ok(func.dfg.append_ebb_param(entry, ty))
Ok(func.dfg.append_block_param(entry, ty))
} else {
Err(abi_type)
}
};
let converted = convert_from_abi(&mut pos, arg_type, Some(arg), &mut get_arg);
// The old `arg` is no longer an attached EBB argument, but there are probably still
// The old `arg` is no longer an attached block argument, but there are probably still
// uses of the value.
debug_assert_eq!(pos.func.dfg.resolve_aliases(arg), converted);
}
@@ -201,7 +201,7 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
// Just create entry block values to match here. We will use them in `handle_return_abi()`
// below.
pos.func.dfg.append_ebb_param(entry, arg.value_type);
pos.func.dfg.append_block_param(entry, arg.value_type);
}
}
@@ -851,7 +851,7 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
let val = pos
.func
.dfg
.ebb_params(pos.func.layout.entry_block().unwrap())[idx];
.block_params(pos.func.layout.entry_block().unwrap())[idx];
debug_assert_eq!(pos.func.dfg.value_type(val), arg.value_type);
vlist.push(val, &mut pos.func.dfg.value_lists);
@@ -958,8 +958,13 @@ fn round_up_to_multiple_of_pow2(n: u32, to: u32) -> u32 {
///
/// Values that are passed into the function on the stack must be assigned to an `IncomingArg`
/// stack slot already during legalization.
fn spill_entry_params(func: &mut Function, entry: Ebb) {
for (abi, &arg) in func.signature.params.iter().zip(func.dfg.ebb_params(entry)) {
fn spill_entry_params(func: &mut Function, entry: Block) {
for (abi, &arg) in func
.signature
.params
.iter()
.zip(func.dfg.block_params(entry))
{
if let ArgumentLoc::Stack(offset) = abi.location {
let ss = func.stack_slots.make_incoming_arg(abi.value_type, offset);
func.locations[arg] = ValueLoc::Stack(ss);

View File

@@ -120,12 +120,12 @@ fn static_addr(
pos.ins().trap(ir::TrapCode::HeapOutOfBounds);
pos.func.dfg.replace(inst).iconst(addr_ty, 0);
// Split Ebb, as the trap is a terminator instruction.
let curr_ebb = pos.current_ebb().expect("Cursor is not in an ebb");
let new_ebb = pos.func.dfg.make_ebb();
pos.insert_ebb(new_ebb);
cfg.recompute_ebb(pos.func, curr_ebb);
cfg.recompute_ebb(pos.func, new_ebb);
// Split Block, as the trap is a terminator instruction.
let curr_block = pos.current_block().expect("Cursor is not in an block");
let new_block = pos.func.dfg.make_block();
pos.insert_block(new_block);
cfg.recompute_block(pos.func, curr_block);
cfg.recompute_block(pos.func, new_block);
return;
}

View File

@@ -87,7 +87,7 @@ fn legalize_inst(
return LegalizeInstResult::SplitLegalizePending;
}
}
ir::ValueDef::Param(_ebb, _num) => {}
ir::ValueDef::Param(_block, _num) => {}
}
let res = pos.func.dfg.inst_results(inst).to_vec();
@@ -148,10 +148,10 @@ pub fn legalize_function(func: &mut ir::Function, cfg: &mut ControlFlowGraph, is
let mut pos = FuncCursor::new(func);
let func_begin = pos.position();
// Split ebb params before trying to legalize instructions, so that the newly introduced
// Split block params before trying to legalize instructions, so that the newly introduced
// isplit instructions get legalized.
while let Some(ebb) = pos.next_ebb() {
split::split_ebb_params(pos.func, cfg, ebb);
while let Some(block) = pos.next_block() {
split::split_block_params(pos.func, cfg, block);
}
pos.set_position(func_begin);
@@ -159,9 +159,9 @@ pub fn legalize_function(func: &mut ir::Function, cfg: &mut ControlFlowGraph, is
// This must be a set to prevent trying to legalize `isplit` and `vsplit` twice in certain cases.
let mut pending_splits = BTreeSet::new();
// Process EBBs in layout order. Some legalization actions may split the current EBB or append
// new ones to the end. We need to make sure we visit those new EBBs too.
while let Some(_ebb) = pos.next_ebb() {
// Process blocks in layout order. Some legalization actions may split the current block or append
// new ones to the end. We need to make sure we visit those new blocks too.
while let Some(_block) = pos.next_block() {
// Keep track of the cursor position before the instruction being processed, so we can
// double back when replacing instructions.
let mut prev_pos = pos.position();
@@ -225,48 +225,48 @@ fn expand_cond_trap(
_ => panic!("Expected cond trap: {}", func.dfg.display_inst(inst, None)),
};
// Split the EBB after `inst`:
// Split the block after `inst`:
//
// trapnz arg
// ..
//
// Becomes:
//
// brz arg, new_ebb_resume
// jump new_ebb_trap
// brz arg, new_block_resume
// jump new_block_trap
//
// new_ebb_trap:
// new_block_trap:
// trap
//
// new_ebb_resume:
// new_block_resume:
// ..
let old_ebb = func.layout.pp_ebb(inst);
let new_ebb_trap = func.dfg.make_ebb();
let new_ebb_resume = func.dfg.make_ebb();
let old_block = func.layout.pp_block(inst);
let new_block_trap = func.dfg.make_block();
let new_block_resume = func.dfg.make_block();
// Replace trap instruction by the inverted condition.
if trapz {
func.dfg.replace(inst).brnz(arg, new_ebb_resume, &[]);
func.dfg.replace(inst).brnz(arg, new_block_resume, &[]);
} else {
func.dfg.replace(inst).brz(arg, new_ebb_resume, &[]);
func.dfg.replace(inst).brz(arg, new_block_resume, &[]);
}
// Add jump instruction after the inverted branch.
let mut pos = FuncCursor::new(func).after_inst(inst);
pos.use_srcloc(inst);
pos.ins().jump(new_ebb_trap, &[]);
pos.ins().jump(new_block_trap, &[]);
// Insert the new label and the unconditional trap terminator.
pos.insert_ebb(new_ebb_trap);
pos.insert_block(new_block_trap);
pos.ins().trap(code);
// Insert the new label and resume the execution when the trap fails.
pos.insert_ebb(new_ebb_resume);
pos.insert_block(new_block_resume);
// Finally update the CFG.
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_ebb(pos.func, new_ebb_resume);
cfg.recompute_ebb(pos.func, new_ebb_trap);
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, new_block_resume);
cfg.recompute_block(pos.func, new_block_trap);
}
/// Jump tables.
@@ -292,7 +292,7 @@ fn expand_br_table_jt(
) {
use crate::ir::condcodes::IntCC;
let (arg, default_ebb, table) = match func.dfg[inst] {
let (arg, default_block, table) = match func.dfg[inst] {
ir::InstructionData::BranchTable {
opcode: ir::Opcode::BrTable,
arg,
@@ -304,22 +304,22 @@ fn expand_br_table_jt(
// Rewrite:
//
// br_table $idx, default_ebb, $jt
// br_table $idx, default_block, $jt
//
// To:
//
// $oob = ifcmp_imm $idx, len($jt)
// brif uge $oob, default_ebb
// jump fallthrough_ebb
// brif uge $oob, default_block
// jump fallthrough_block
//
// fallthrough_ebb:
// fallthrough_block:
// $base = jump_table_base.i64 $jt
// $rel_addr = jump_table_entry.i64 $idx, $base, 4, $jt
// $addr = iadd $base, $rel_addr
// indirect_jump_table_br $addr, $jt
let ebb = func.layout.pp_ebb(inst);
let jump_table_ebb = func.dfg.make_ebb();
let block = func.layout.pp_block(inst);
let jump_table_block = func.dfg.make_block();
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
@@ -330,9 +330,9 @@ fn expand_br_table_jt(
.ins()
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, arg, table_size);
pos.ins().brnz(oob, default_ebb, &[]);
pos.ins().jump(jump_table_ebb, &[]);
pos.insert_ebb(jump_table_ebb);
pos.ins().brnz(oob, default_block, &[]);
pos.ins().jump(jump_table_block, &[]);
pos.insert_block(jump_table_block);
let addr_ty = isa.pointer_type();
@@ -351,8 +351,8 @@ fn expand_br_table_jt(
pos.ins().indirect_jump_table_br(addr, table);
pos.remove_inst();
cfg.recompute_ebb(pos.func, ebb);
cfg.recompute_ebb(pos.func, jump_table_ebb);
cfg.recompute_block(pos.func, block);
cfg.recompute_block(pos.func, jump_table_block);
}
/// Expand br_table to series of conditionals.
@@ -364,7 +364,7 @@ fn expand_br_table_conds(
) {
use crate::ir::condcodes::IntCC;
let (arg, default_ebb, table) = match func.dfg[inst] {
let (arg, default_block, table) = match func.dfg[inst] {
ir::InstructionData::BranchTable {
opcode: ir::Opcode::BrTable,
arg,
@@ -374,15 +374,15 @@ fn expand_br_table_conds(
_ => panic!("Expected br_table: {}", func.dfg.display_inst(inst, None)),
};
let ebb = func.layout.pp_ebb(inst);
let block = func.layout.pp_block(inst);
// This is a poor man's jump table using just a sequence of conditional branches.
let table_size = func.jump_tables[table].len();
let mut cond_failed_ebb = vec![];
let mut cond_failed_block = vec![];
if table_size >= 1 {
cond_failed_ebb = alloc::vec::Vec::with_capacity(table_size - 1);
cond_failed_block = alloc::vec::Vec::with_capacity(table_size - 1);
for _ in 0..table_size - 1 {
cond_failed_ebb.push(func.dfg.make_ebb());
cond_failed_block.push(func.dfg.make_block());
}
}
@@ -397,19 +397,19 @@ fn expand_br_table_conds(
pos.ins().brnz(t, dest, &[]);
// Jump to the next case.
if i < table_size - 1 {
let ebb = cond_failed_ebb[i];
pos.ins().jump(ebb, &[]);
pos.insert_ebb(ebb);
let block = cond_failed_block[i];
pos.ins().jump(block, &[]);
pos.insert_block(block);
}
}
// `br_table` jumps to the default destination if nothing matches
pos.ins().jump(default_ebb, &[]);
pos.ins().jump(default_block, &[]);
pos.remove_inst();
cfg.recompute_ebb(pos.func, ebb);
for failed_ebb in cond_failed_ebb.into_iter() {
cfg.recompute_ebb(pos.func, failed_ebb);
cfg.recompute_block(pos.func, block);
for failed_block in cond_failed_block.into_iter() {
cfg.recompute_block(pos.func, failed_block);
}
}
@@ -433,23 +433,23 @@ fn expand_select(
// Replace `result = select ctrl, tval, fval` with:
//
// brnz ctrl, new_ebb(tval)
// jump new_ebb(fval)
// new_ebb(result):
let old_ebb = func.layout.pp_ebb(inst);
// brnz ctrl, new_block(tval)
// jump new_block(fval)
// new_block(result):
let old_block = func.layout.pp_block(inst);
let result = func.dfg.first_result(inst);
func.dfg.clear_results(inst);
let new_ebb = func.dfg.make_ebb();
func.dfg.attach_ebb_param(new_ebb, result);
let new_block = func.dfg.make_block();
func.dfg.attach_block_param(new_block, result);
func.dfg.replace(inst).brnz(ctrl, new_ebb, &[tval]);
func.dfg.replace(inst).brnz(ctrl, new_block, &[tval]);
let mut pos = FuncCursor::new(func).after_inst(inst);
pos.use_srcloc(inst);
pos.ins().jump(new_ebb, &[fval]);
pos.insert_ebb(new_ebb);
pos.ins().jump(new_block, &[fval]);
pos.insert_block(new_block);
cfg.recompute_ebb(pos.func, new_ebb);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_block(pos.func, new_block);
cfg.recompute_block(pos.func, old_block);
}
fn expand_br_icmp(
@@ -458,7 +458,7 @@ fn expand_br_icmp(
cfg: &mut ControlFlowGraph,
_isa: &dyn TargetIsa,
) {
let (cond, a, b, destination, ebb_args) = match func.dfg[inst] {
let (cond, a, b, destination, block_args) = match func.dfg[inst] {
ir::InstructionData::BranchIcmp {
cond,
destination,
@@ -474,16 +474,16 @@ fn expand_br_icmp(
_ => panic!("Expected br_icmp {}", func.dfg.display_inst(inst, None)),
};
let old_ebb = func.layout.pp_ebb(inst);
let old_block = func.layout.pp_block(inst);
func.dfg.clear_results(inst);
let icmp_res = func.dfg.replace(inst).icmp(cond, a, b);
let mut pos = FuncCursor::new(func).after_inst(inst);
pos.use_srcloc(inst);
pos.ins().brnz(icmp_res, destination, &ebb_args);
pos.ins().brnz(icmp_res, destination, &block_args);
cfg.recompute_ebb(pos.func, destination);
cfg.recompute_ebb(pos.func, old_ebb);
cfg.recompute_block(pos.func, destination);
cfg.recompute_block(pos.func, old_block);
}
/// Expand illegal `f32const` and `f64const` instructions.

View File

@@ -54,19 +54,19 @@
//! This means that the `iconcat` instructions defining `v1` and `v4` end up with no uses, so they
//! can be trivially deleted by a dead code elimination pass.
//!
//! # EBB arguments
//! # block arguments
//!
//! If all instructions that produce an `i64` value are legalized as above, we will eventually end
//! up with no `i64` values anywhere, except for EBB arguments. We can work around this by
//! iteratively splitting EBB arguments too. That should leave us with no illegal value types
//! up with no `i64` values anywhere, except for block arguments. We can work around this by
//! iteratively splitting block arguments too. That should leave us with no illegal value types
//! anywhere.
//!
//! It is possible to have circular dependencies of EBB arguments that are never used by any real
//! It is possible to have circular dependencies of block arguments that are never used by any real
//! instructions. These loops will remain in the program.
use crate::cursor::{Cursor, CursorPosition, FuncCursor};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::ir::{self, Ebb, Inst, InstBuilder, InstructionData, Opcode, Type, Value, ValueDef};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::{self, Block, Inst, InstBuilder, InstructionData, Opcode, Type, Value, ValueDef};
use alloc::vec::Vec;
use core::iter;
use smallvec::SmallVec;
@@ -95,7 +95,7 @@ pub fn vsplit(
split_any(func, cfg, pos, srcloc, value, Opcode::Vconcat)
}
/// After splitting an EBB argument, we need to go back and fix up all of the predecessor
/// After splitting an block argument, we need to go back and fix up all of the predecessor
/// instructions. This is potentially a recursive operation, but we don't implement it recursively
/// since that could use up too muck stack.
///
@@ -104,11 +104,11 @@ struct Repair {
concat: Opcode,
// The argument type after splitting.
split_type: Type,
// The destination EBB whose arguments have been split.
ebb: Ebb,
// Number of the original EBB argument which has been replaced by the low part.
// The destination block whose arguments have been split.
block: Block,
// Number of the original block argument which has been replaced by the low part.
num: usize,
// Number of the new EBB argument which represents the high part after the split.
// Number of the new block argument which represents the high part after the split.
hi_num: usize,
}
@@ -130,9 +130,9 @@ fn split_any(
result
}
pub fn split_ebb_params(func: &mut ir::Function, cfg: &ControlFlowGraph, ebb: Ebb) {
let pos = &mut FuncCursor::new(func).at_top(ebb);
let ebb_params = pos.func.dfg.ebb_params(ebb);
pub fn split_block_params(func: &mut ir::Function, cfg: &ControlFlowGraph, block: Block) {
let pos = &mut FuncCursor::new(func).at_top(block);
let block_params = pos.func.dfg.block_params(block);
// Add further splittable types here.
fn type_requires_splitting(ty: Type) -> bool {
@@ -140,31 +140,31 @@ pub fn split_ebb_params(func: &mut ir::Function, cfg: &ControlFlowGraph, ebb: Eb
}
// A shortcut. If none of the param types require splitting, exit now. This helps because
// the loop below necessarily has to copy the ebb params into a new vector, so it's better to
// the loop below necessarily has to copy the block params into a new vector, so it's better to
// avoid doing so when possible.
if !ebb_params
if !block_params
.iter()
.any(|ebb_param| type_requires_splitting(pos.func.dfg.value_type(*ebb_param)))
.any(|block_param| type_requires_splitting(pos.func.dfg.value_type(*block_param)))
{
return;
}
let mut repairs = Vec::new();
for (num, ebb_param) in ebb_params.to_vec().into_iter().enumerate() {
if !type_requires_splitting(pos.func.dfg.value_type(ebb_param)) {
for (num, block_param) in block_params.to_vec().into_iter().enumerate() {
if !type_requires_splitting(pos.func.dfg.value_type(block_param)) {
continue;
}
split_ebb_param(pos, ebb, num, ebb_param, Opcode::Iconcat, &mut repairs);
split_block_param(pos, block, num, block_param, Opcode::Iconcat, &mut repairs);
}
perform_repairs(pos, cfg, repairs);
}
fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Vec<Repair>) {
// We have split the value requested, and now we may need to fix some EBB predecessors.
// We have split the value requested, and now we may need to fix some block predecessors.
while let Some(repair) = repairs.pop() {
for BasicBlock { inst, .. } in cfg.pred_iter(repair.ebb) {
for BlockPredecessor { inst, .. } in cfg.pred_iter(repair.block) {
let branch_opc = pos.func.dfg[inst].opcode();
debug_assert!(
branch_opc.is_branch(),
@@ -176,7 +176,7 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve
.take_value_list()
.expect("Branches must have value lists.");
let num_args = args.len(&pos.func.dfg.value_lists);
// Get the old value passed to the EBB argument we're repairing.
// Get the old value passed to the block argument we're repairing.
let old_arg = args
.get(num_fixed_args + repair.num, &pos.func.dfg.value_lists)
.expect("Too few branch arguments");
@@ -190,13 +190,13 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve
// Split the old argument, possibly causing more repairs to be scheduled.
pos.goto_inst(inst);
let inst_ebb = pos.func.layout.inst_ebb(inst).expect("inst in ebb");
let inst_block = pos.func.layout.inst_block(inst).expect("inst in block");
// Insert split values prior to the terminal branch group.
let canonical = pos
.func
.layout
.canonical_branch_inst(&pos.func.dfg, inst_ebb);
.canonical_branch_inst(&pos.func.dfg, inst_block);
if let Some(first_branch) = canonical {
pos.goto_inst(first_branch);
}
@@ -209,7 +209,7 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve
.unwrap() = lo;
// The `hi` part goes at the end. Since multiple repairs may have been scheduled to the
// same EBB, there could be multiple arguments missing.
// same block, there could be multiple arguments missing.
if num_args > num_fixed_args + repair.hi_num {
*args
.get_mut(
@@ -259,11 +259,11 @@ fn split_value(
}
}
}
ValueDef::Param(ebb, num) => {
// This is an EBB parameter.
ValueDef::Param(block, num) => {
// This is an block parameter.
// We can split the parameter value unless this is the entry block.
if pos.func.layout.entry_block() != Some(ebb) {
reuse = Some(split_ebb_param(pos, ebb, num, value, concat, repairs));
if pos.func.layout.entry_block() != Some(block) {
reuse = Some(split_block_param(pos, block, num, value, concat, repairs));
}
}
}
@@ -273,7 +273,7 @@ fn split_value(
pair
} else {
// No, we'll just have to insert the requested split instruction at `pos`. Note that `pos`
// has not been moved by the EBB argument code above when `reuse` is `None`.
// has not been moved by the block argument code above when `reuse` is `None`.
match concat {
Opcode::Iconcat => pos.ins().isplit(value),
Opcode::Vconcat => pos.ins().vsplit(value),
@@ -282,9 +282,9 @@ fn split_value(
}
}
fn split_ebb_param(
fn split_block_param(
pos: &mut FuncCursor,
ebb: Ebb,
block: Block,
param_num: usize,
value: Value,
concat: Opcode,
@@ -300,14 +300,14 @@ fn split_ebb_param(
};
// Since the `repairs` stack potentially contains other parameter numbers for
// `ebb`, avoid shifting and renumbering EBB parameters. It could invalidate other
// `block`, avoid shifting and renumbering block parameters. It could invalidate other
// `repairs` entries.
//
// Replace the original `value` with the low part, and append the high part at the
// end of the argument list.
let lo = pos.func.dfg.replace_ebb_param(value, split_type);
let hi_num = pos.func.dfg.num_ebb_params(ebb);
let hi = pos.func.dfg.append_ebb_param(ebb, split_type);
let lo = pos.func.dfg.replace_block_param(value, split_type);
let hi_num = pos.func.dfg.num_block_params(block);
let hi = pos.func.dfg.append_block_param(block, split_type);
// Now the original value is dangling. Insert a concatenation instruction that can
// compute it from the two new parameters. This also serves as a record of what we
@@ -315,14 +315,14 @@ fn split_ebb_param(
//
// Note that it is safe to move `pos` here since `reuse` was set above, so we don't
// need to insert a split instruction before returning.
pos.goto_first_inst(ebb);
pos.goto_first_inst(block);
pos.ins()
.with_result(value)
.Binary(concat, split_type, lo, hi);
// Finally, splitting the EBB parameter is not enough. We also have to repair all
// Finally, splitting the block parameter is not enough. We also have to repair all
// of the predecessor instructions that branch here.
add_repair(concat, split_type, ebb, param_num, hi_num, repairs);
add_repair(concat, split_type, block, param_num, hi_num, repairs);
(lo, hi)
}
@@ -331,7 +331,7 @@ fn split_ebb_param(
fn add_repair(
concat: Opcode,
split_type: Type,
ebb: Ebb,
block: Block,
num: usize,
hi_num: usize,
repairs: &mut Vec<Repair>,
@@ -339,7 +339,7 @@ fn add_repair(
repairs.push(Repair {
concat,
split_type,
ebb,
block,
num,
hi_num,
});

View File

@@ -3,10 +3,10 @@
use crate::cursor::{Cursor, EncCursor, FuncCursor};
use crate::dominator_tree::DominatorTree;
use crate::entity::{EntityList, ListPool};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::fx::FxHashSet;
use crate::ir::{
DataFlowGraph, Ebb, Function, Inst, InstBuilder, InstructionData, Layout, Opcode, Type, Value,
Block, DataFlowGraph, Function, Inst, InstBuilder, InstructionData, Layout, Opcode, Type, Value,
};
use crate::isa::TargetIsa;
use crate::loop_analysis::{Loop, LoopAnalysis};
@@ -65,23 +65,23 @@ pub fn do_licm(
// A jump instruction to the header is placed at the end of the pre-header.
fn create_pre_header(
isa: &dyn TargetIsa,
header: Ebb,
header: Block,
func: &mut Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree,
) -> Ebb {
) -> Block {
let pool = &mut ListPool::<Value>::new();
let header_args_values = func.dfg.ebb_params(header).to_vec();
let header_args_values = func.dfg.block_params(header).to_vec();
let header_args_types: Vec<Type> = header_args_values
.into_iter()
.map(|val| func.dfg.value_type(val))
.collect();
let pre_header = func.dfg.make_ebb();
let pre_header = func.dfg.make_block();
let mut pre_header_args_value: EntityList<Value> = EntityList::new();
for typ in header_args_types {
pre_header_args_value.push(func.dfg.append_ebb_param(pre_header, typ), pool);
pre_header_args_value.push(func.dfg.append_block_param(pre_header, typ), pool);
}
for BasicBlock {
for BlockPredecessor {
inst: last_inst, ..
} in cfg.pred_iter(header)
{
@@ -93,7 +93,7 @@ fn create_pre_header(
{
let mut pos = EncCursor::new(func, isa).at_top(header);
// Inserts the pre-header at the right place in the layout.
pos.insert_ebb(pre_header);
pos.insert_block(pre_header);
pos.next_inst();
pos.ins().jump(header, pre_header_args_value.as_slice(pool));
}
@@ -104,16 +104,16 @@ fn create_pre_header(
//
// A loop header has a pre-header if there is only one predecessor that the header doesn't
// dominate.
// Returns the pre-header Ebb and the instruction jumping to the header.
// Returns the pre-header Block and the instruction jumping to the header.
fn has_pre_header(
layout: &Layout,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
header: Ebb,
) -> Option<(Ebb, Inst)> {
header: Block,
) -> Option<(Block, Inst)> {
let mut result = None;
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: branch_inst,
} in cfg.pred_iter(header)
{
@@ -123,13 +123,13 @@ fn has_pre_header(
// We have already found one, there are more than one
return None;
}
if branch_inst != layout.last_inst(pred_ebb).unwrap()
|| cfg.succ_iter(pred_ebb).nth(1).is_some()
if branch_inst != layout.last_inst(pred_block).unwrap()
|| cfg.succ_iter(pred_block).nth(1).is_some()
{
// It's along a critical edge, so don't use it.
return None;
}
result = Some((pred_ebb, branch_inst));
result = Some((pred_block, branch_inst));
}
}
result
@@ -176,7 +176,7 @@ fn is_loop_invariant(inst: Inst, dfg: &DataFlowGraph, loop_values: &FxHashSet<Va
true
}
// Traverses a loop in reverse post-order from a header EBB and identify loop-invariant
// Traverses a loop in reverse post-order from a header block and identify loop-invariant
// instructions. These loop-invariant instructions are then removed from the code and returned
// (in reverse post-order) for later use.
fn remove_loop_invariant_instructions(
@@ -188,13 +188,13 @@ fn remove_loop_invariant_instructions(
let mut loop_values: FxHashSet<Value> = FxHashSet();
let mut invariant_insts: Vec<Inst> = Vec::new();
let mut pos = FuncCursor::new(func);
// We traverse the loop EBB in reverse post-order.
for ebb in postorder_ebbs_loop(loop_analysis, cfg, lp).iter().rev() {
// Arguments of the EBB are loop values
for val in pos.func.dfg.ebb_params(*ebb) {
// We traverse the loop block in reverse post-order.
for block in postorder_blocks_loop(loop_analysis, cfg, lp).iter().rev() {
// Arguments of the block are loop values
for val in pos.func.dfg.block_params(*block) {
loop_values.insert(*val);
}
pos.goto_top(*ebb);
pos.goto_top(*block);
#[cfg_attr(feature = "cargo-clippy", allow(clippy::block_in_if_condition_stmt))]
while let Some(inst) = pos.next_inst() {
if is_loop_invariant(inst, &pos.func.dfg, &loop_values) {
@@ -215,8 +215,12 @@ fn remove_loop_invariant_instructions(
invariant_insts
}
/// Return ebbs from a loop in post-order, starting from an entry point in the block.
fn postorder_ebbs_loop(loop_analysis: &LoopAnalysis, cfg: &ControlFlowGraph, lp: Loop) -> Vec<Ebb> {
/// Return blocks from a loop in post-order, starting from an entry point in the block.
fn postorder_blocks_loop(
loop_analysis: &LoopAnalysis,
cfg: &ControlFlowGraph,
lp: Loop,
) -> Vec<Block> {
let mut grey = FxHashSet();
let mut black = FxHashSet();
let mut stack = vec![loop_analysis.loop_header(lp)];

View File

@@ -1,12 +1,12 @@
//! A loop analysis represented as mappings of loops to their header Ebb
//! A loop analysis represented as mappings of loops to their header Block
//! and parent in the loop tree.
use crate::dominator_tree::DominatorTree;
use crate::entity::entity_impl;
use crate::entity::SecondaryMap;
use crate::entity::{Keys, PrimaryMap};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::ir::{Ebb, Function, Layout};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::{Block, Function, Layout};
use crate::packed_option::PackedOption;
use crate::timing;
use alloc::vec::Vec;
@@ -18,22 +18,22 @@ entity_impl!(Loop, "loop");
/// Loop tree information for a single function.
///
/// Loops are referenced by the Loop object, and for each loop you can access its header EBB,
/// its eventual parent in the loop tree and all the EBB belonging to the loop.
/// Loops are referenced by the Loop object, and for each loop you can access its header block,
/// its eventual parent in the loop tree and all the block belonging to the loop.
pub struct LoopAnalysis {
loops: PrimaryMap<Loop, LoopData>,
ebb_loop_map: SecondaryMap<Ebb, PackedOption<Loop>>,
block_loop_map: SecondaryMap<Block, PackedOption<Loop>>,
valid: bool,
}
struct LoopData {
header: Ebb,
header: Block,
parent: PackedOption<Loop>,
}
impl LoopData {
/// Creates a `LoopData` object with the loop header and its eventual parent in the loop tree.
pub fn new(header: Ebb, parent: Option<Loop>) -> Self {
pub fn new(header: Block, parent: Option<Loop>) -> Self {
Self {
header,
parent: parent.into(),
@@ -49,7 +49,7 @@ impl LoopAnalysis {
Self {
valid: false,
loops: PrimaryMap::new(),
ebb_loop_map: SecondaryMap::new(),
block_loop_map: SecondaryMap::new(),
}
}
@@ -58,11 +58,11 @@ impl LoopAnalysis {
self.loops.keys()
}
/// Returns the header EBB of a particular loop.
/// Returns the header block of a particular loop.
///
/// The characteristic property of a loop header block is that it dominates some of its
/// predecessors.
pub fn loop_header(&self, lp: Loop) -> Ebb {
pub fn loop_header(&self, lp: Loop) -> Block {
self.loops[lp].header
}
@@ -71,14 +71,14 @@ impl LoopAnalysis {
self.loops[lp].parent.expand()
}
/// Determine if an Ebb belongs to a loop by running a finger along the loop tree.
/// Determine if an Block belongs to a loop by running a finger along the loop tree.
///
/// Returns `true` if `ebb` is in loop `lp`.
pub fn is_in_loop(&self, ebb: Ebb, lp: Loop) -> bool {
let ebb_loop = self.ebb_loop_map[ebb];
match ebb_loop.expand() {
/// Returns `true` if `block` is in loop `lp`.
pub fn is_in_loop(&self, block: Block, lp: Loop) -> bool {
let block_loop = self.block_loop_map[block];
match block_loop.expand() {
None => false,
Some(ebb_loop) => self.is_child_loop(ebb_loop, lp),
Some(block_loop) => self.is_child_loop(block_loop, lp),
}
}
@@ -103,8 +103,8 @@ impl LoopAnalysis {
pub fn compute(&mut self, func: &Function, cfg: &ControlFlowGraph, domtree: &DominatorTree) {
let _tt = timing::loop_analysis();
self.loops.clear();
self.ebb_loop_map.clear();
self.ebb_loop_map.resize(func.dfg.num_ebbs());
self.block_loop_map.clear();
self.block_loop_map.resize(func.dfg.num_blocks());
self.find_loop_headers(cfg, domtree, &func.layout);
self.discover_loop_blocks(cfg, domtree, &func.layout);
self.valid = true;
@@ -124,11 +124,11 @@ impl LoopAnalysis {
/// memory be retained.
pub fn clear(&mut self) {
self.loops.clear();
self.ebb_loop_map.clear();
self.block_loop_map.clear();
self.valid = false;
}
// Traverses the CFG in reverse postorder and create a loop object for every EBB having a
// Traverses the CFG in reverse postorder and create a loop object for every block having a
// back edge.
fn find_loop_headers(
&mut self,
@@ -137,16 +137,16 @@ impl LoopAnalysis {
layout: &Layout,
) {
// We traverse the CFG in reverse postorder
for &ebb in domtree.cfg_postorder().iter().rev() {
for BasicBlock {
for &block in domtree.cfg_postorder().iter().rev() {
for BlockPredecessor {
inst: pred_inst, ..
} in cfg.pred_iter(ebb)
} in cfg.pred_iter(block)
{
// If the ebb dominates one of its predecessors it is a back edge
if domtree.dominates(ebb, pred_inst, layout) {
// This ebb is a loop header, so we create its associated loop
let lp = self.loops.push(LoopData::new(ebb, None));
self.ebb_loop_map[ebb] = lp.into();
// If the block dominates one of its predecessors it is a back edge
if domtree.dominates(block, pred_inst, layout) {
// This block is a loop header, so we create its associated loop
let lp = self.loops.push(LoopData::new(block, None));
self.block_loop_map[block] = lp.into();
break;
// We break because we only need one back edge to identify a loop header.
}
@@ -155,7 +155,7 @@ impl LoopAnalysis {
}
// Intended to be called after `find_loop_headers`. For each detected loop header,
// discovers all the ebb belonging to the loop and its inner loops. After a call to this
// discovers all the block belonging to the loop and its inner loops. After a call to this
// function, the loop tree is fully constructed.
fn discover_loop_blocks(
&mut self,
@@ -163,12 +163,12 @@ impl LoopAnalysis {
domtree: &DominatorTree,
layout: &Layout,
) {
let mut stack: Vec<Ebb> = Vec::new();
let mut stack: Vec<Block> = Vec::new();
// We handle each loop header in reverse order, corresponding to a pseudo postorder
// traversal of the graph.
for lp in self.loops().rev() {
for BasicBlock {
ebb: pred,
for BlockPredecessor {
block: pred,
inst: pred_inst,
} in cfg.pred_iter(self.loops[lp].header)
{
@@ -178,11 +178,11 @@ impl LoopAnalysis {
}
}
while let Some(node) = stack.pop() {
let continue_dfs: Option<Ebb>;
match self.ebb_loop_map[node].expand() {
let continue_dfs: Option<Block>;
match self.block_loop_map[node].expand() {
None => {
// The node hasn't been visited yet, we tag it as part of the loop
self.ebb_loop_map[node] = PackedOption::from(lp);
self.block_loop_map[node] = PackedOption::from(lp);
continue_dfs = Some(node);
}
Some(node_loop) => {
@@ -221,7 +221,7 @@ impl LoopAnalysis {
// Now we have handled the popped node and need to continue the DFS by adding the
// predecessors of that node
if let Some(continue_dfs) = continue_dfs {
for BasicBlock { ebb: pred, .. } in cfg.pred_iter(continue_dfs) {
for BlockPredecessor { block: pred, .. } in cfg.pred_iter(continue_dfs) {
stack.push(pred)
}
}
@@ -242,27 +242,27 @@ mod tests {
#[test]
fn nested_loops_detection() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let ebb3 = func.dfg.make_ebb();
let cond = func.dfg.append_ebb_param(ebb0, types::I32);
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let block3 = func.dfg.make_block();
let cond = func.dfg.append_block_param(block0, types::I32);
{
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
cur.ins().jump(ebb1, &[]);
cur.insert_block(block0);
cur.ins().jump(block1, &[]);
cur.insert_ebb(ebb1);
cur.ins().jump(ebb2, &[]);
cur.insert_block(block1);
cur.ins().jump(block2, &[]);
cur.insert_ebb(ebb2);
cur.ins().brnz(cond, ebb1, &[]);
cur.ins().jump(ebb3, &[]);
cur.insert_block(block2);
cur.ins().brnz(cond, block1, &[]);
cur.ins().jump(block3, &[]);
cur.insert_ebb(ebb3);
cur.ins().brnz(cond, ebb0, &[]);
cur.insert_block(block3);
cur.ins().brnz(cond, block0, &[]);
}
let mut loop_analysis = LoopAnalysis::new();
@@ -274,54 +274,54 @@ mod tests {
let loops = loop_analysis.loops().collect::<Vec<Loop>>();
assert_eq!(loops.len(), 2);
assert_eq!(loop_analysis.loop_header(loops[0]), ebb0);
assert_eq!(loop_analysis.loop_header(loops[1]), ebb1);
assert_eq!(loop_analysis.loop_header(loops[0]), block0);
assert_eq!(loop_analysis.loop_header(loops[1]), block1);
assert_eq!(loop_analysis.loop_parent(loops[1]), Some(loops[0]));
assert_eq!(loop_analysis.loop_parent(loops[0]), None);
assert_eq!(loop_analysis.is_in_loop(ebb0, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(ebb0, loops[1]), false);
assert_eq!(loop_analysis.is_in_loop(ebb1, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(ebb1, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(ebb2, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(ebb2, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(ebb3, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(ebb0, loops[1]), false);
assert_eq!(loop_analysis.is_in_loop(block0, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block0, loops[1]), false);
assert_eq!(loop_analysis.is_in_loop(block1, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(block1, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block2, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(block2, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block3, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block0, loops[1]), false);
}
#[test]
fn complex_loop_detection() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let ebb2 = func.dfg.make_ebb();
let ebb3 = func.dfg.make_ebb();
let ebb4 = func.dfg.make_ebb();
let ebb5 = func.dfg.make_ebb();
let cond = func.dfg.append_ebb_param(ebb0, types::I32);
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let block3 = func.dfg.make_block();
let block4 = func.dfg.make_block();
let block5 = func.dfg.make_block();
let cond = func.dfg.append_block_param(block0, types::I32);
{
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
cur.ins().brnz(cond, ebb1, &[]);
cur.ins().jump(ebb3, &[]);
cur.insert_block(block0);
cur.ins().brnz(cond, block1, &[]);
cur.ins().jump(block3, &[]);
cur.insert_ebb(ebb1);
cur.ins().jump(ebb2, &[]);
cur.insert_block(block1);
cur.ins().jump(block2, &[]);
cur.insert_ebb(ebb2);
cur.ins().brnz(cond, ebb1, &[]);
cur.ins().jump(ebb5, &[]);
cur.insert_block(block2);
cur.ins().brnz(cond, block1, &[]);
cur.ins().jump(block5, &[]);
cur.insert_ebb(ebb3);
cur.ins().jump(ebb4, &[]);
cur.insert_block(block3);
cur.ins().jump(block4, &[]);
cur.insert_ebb(ebb4);
cur.ins().brnz(cond, ebb3, &[]);
cur.ins().jump(ebb5, &[]);
cur.insert_block(block4);
cur.ins().brnz(cond, block3, &[]);
cur.ins().jump(block5, &[]);
cur.insert_ebb(ebb5);
cur.ins().brnz(cond, ebb0, &[]);
cur.insert_block(block5);
cur.ins().brnz(cond, block0, &[]);
}
let mut loop_analysis = LoopAnalysis::new();
@@ -333,17 +333,17 @@ mod tests {
let loops = loop_analysis.loops().collect::<Vec<Loop>>();
assert_eq!(loops.len(), 3);
assert_eq!(loop_analysis.loop_header(loops[0]), ebb0);
assert_eq!(loop_analysis.loop_header(loops[1]), ebb1);
assert_eq!(loop_analysis.loop_header(loops[2]), ebb3);
assert_eq!(loop_analysis.loop_header(loops[0]), block0);
assert_eq!(loop_analysis.loop_header(loops[1]), block1);
assert_eq!(loop_analysis.loop_header(loops[2]), block3);
assert_eq!(loop_analysis.loop_parent(loops[1]), Some(loops[0]));
assert_eq!(loop_analysis.loop_parent(loops[2]), Some(loops[0]));
assert_eq!(loop_analysis.loop_parent(loops[0]), None);
assert_eq!(loop_analysis.is_in_loop(ebb0, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(ebb1, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(ebb2, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(ebb3, loops[2]), true);
assert_eq!(loop_analysis.is_in_loop(ebb4, loops[2]), true);
assert_eq!(loop_analysis.is_in_loop(ebb5, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block0, loops[0]), true);
assert_eq!(loop_analysis.is_in_loop(block1, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(block2, loops[1]), true);
assert_eq!(loop_analysis.is_in_loop(block3, loops[2]), true);
assert_eq!(loop_analysis.is_in_loop(block4, loops[2]), true);
assert_eq!(loop_analysis.is_in_loop(block5, loops[0]), true);
}
}

View File

@@ -18,7 +18,7 @@ static CANON_64BIT_NAN: u64 = 0b011111111111100000000000000000000000000000000000
pub fn do_nan_canonicalization(func: &mut Function) {
let _tt = timing::canonicalize_nans();
let mut pos = FuncCursor::new(func);
while let Some(_ebb) = pos.next_ebb() {
while let Some(_block) = pos.next_block() {
while let Some(inst) = pos.next_inst() {
if is_fp_arith(&mut pos, inst) {
add_nan_canon_seq(&mut pos, inst);
@@ -59,7 +59,7 @@ fn add_nan_canon_seq(pos: &mut FuncCursor, inst: Inst) {
let val = pos.func.dfg.first_result(inst);
let val_type = pos.func.dfg.value_type(val);
let new_res = pos.func.dfg.replace_result(val, val_type);
let _next_inst = pos.next_inst().expect("EBB missing terminator!");
let _next_inst = pos.next_inst().expect("block missing terminator!");
// Insert a comparison instruction, to check if `inst_res` is NaN. Select
// the canonical NaN value if `val` is NaN, assign the result to `inst`.

View File

@@ -7,7 +7,7 @@ use crate::ir::condcodes::{CondCode, FloatCC, IntCC};
use crate::ir::dfg::ValueDef;
use crate::ir::immediates::{Imm64, Offset32};
use crate::ir::instructions::{Opcode, ValueList};
use crate::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, MemFlags, Type, Value};
use crate::ir::{Block, Function, Inst, InstBuilder, InstructionData, MemFlags, Type, Value};
use crate::isa::TargetIsa;
use crate::timing;
@@ -18,7 +18,7 @@ struct CmpBrInfo {
/// The icmp, icmp_imm, or fcmp instruction.
cmp_inst: Inst,
/// The destination of the branch.
destination: Ebb,
destination: Block,
/// The arguments of the branch.
args: ValueList,
/// The first argument to the comparison. The second is in the `kind` field.
@@ -360,7 +360,7 @@ fn optimize_complex_addresses(pos: &mut EncCursor, inst: Inst, isa: &dyn TargetI
pub fn do_postopt(func: &mut Function, isa: &dyn TargetIsa) {
let _tt = timing::postopt();
let mut pos = EncCursor::new(func, isa);
while let Some(_ebb) = pos.next_ebb() {
while let Some(_block) = pos.next_block() {
let mut last_flags_clobber = None;
while let Some(inst) = pos.next_inst() {
if isa.uses_cpu_flags() {

View File

@@ -2,7 +2,7 @@
use crate::entity::SecondaryMap;
use crate::ir;
use crate::ir::entities::{AnyEntity, Ebb, Inst, Value};
use crate::ir::entities::{AnyEntity, Block, Inst, Value};
use crate::ir::function::Function;
use crate::isa::TargetIsa;
use crate::result::CodegenError;
@@ -47,15 +47,15 @@ pub fn pretty_verifier_error<'a>(
struct PrettyVerifierError<'a>(Box<dyn FuncWriter + 'a>, &'a mut Vec<VerifierError>);
impl<'a> FuncWriter for PrettyVerifierError<'a> {
fn write_ebb_header(
fn write_block_header(
&mut self,
w: &mut dyn Write,
func: &Function,
isa: Option<&dyn TargetIsa>,
ebb: Ebb,
block: Block,
indent: usize,
) -> fmt::Result {
pretty_ebb_header_error(w, func, isa, ebb, indent, &mut *self.0, self.1)
pretty_block_header_error(w, func, isa, block, indent, &mut *self.0, self.1)
}
fn write_instruction(
@@ -81,18 +81,18 @@ impl<'a> FuncWriter for PrettyVerifierError<'a> {
}
}
/// Pretty-print a function verifier error for a given EBB.
fn pretty_ebb_header_error(
/// Pretty-print a function verifier error for a given block.
fn pretty_block_header_error(
w: &mut dyn Write,
func: &Function,
isa: Option<&dyn TargetIsa>,
cur_ebb: Ebb,
cur_block: Block,
indent: usize,
func_w: &mut dyn FuncWriter,
errors: &mut Vec<VerifierError>,
) -> fmt::Result {
let mut s = String::new();
func_w.write_ebb_header(&mut s, func, isa, cur_ebb, indent)?;
func_w.write_block_header(&mut s, func, isa, cur_block, indent)?;
write!(w, "{}", s)?;
// TODO: Use drain_filter here when it gets stabilized
@@ -100,7 +100,7 @@ fn pretty_ebb_header_error(
let mut printed_error = false;
while i != errors.len() {
match errors[i].location {
ir::entities::AnyEntity::Ebb(ebb) if ebb == cur_ebb => {
ir::entities::AnyEntity::Block(block) if block == cur_block => {
if !printed_error {
print_arrow(w, &s)?;
printed_error = true;

View File

@@ -8,7 +8,8 @@ use crate::ir::dfg::DataFlowGraph;
use crate::ir::instructions::BranchInfo;
use crate::ir::stackslot::{StackSlotKind, StackSlots};
use crate::ir::{
Ebb, Function, Inst, InstBuilder, InstructionData, Opcode, StackSlotData, Type, Value, ValueLoc,
Block, Function, Inst, InstBuilder, InstructionData, Opcode, StackSlotData, Type, Value,
ValueLoc,
};
use crate::isa::{RegInfo, RegUnit, TargetIsa};
use crate::regalloc::RegDiversions;
@@ -20,7 +21,7 @@ use cranelift_entity::{PrimaryMap, SecondaryMap};
// A description of the redundant-fill-removal algorithm
//
//
// The algorithm works forwards through each Ebb. It carries along and updates a table,
// The algorithm works forwards through each Block. It carries along and updates a table,
// AvailEnv, with which it tracks registers that are known to have the same value as some stack
// slot. The actions on encountering an instruction depend on the instruction, as follows:
//
@@ -68,19 +69,19 @@ use cranelift_entity::{PrimaryMap, SecondaryMap};
//
// The overall algorithm, for a function, starts like this:
//
// * (once per function): finds Ebbs that have two or more predecessors, since they will be the
// roots of Ebb trees. Also, the entry node for the function is considered to be a root.
// * (once per function): finds Blocks that have two or more predecessors, since they will be the
// roots of Block trees. Also, the entry node for the function is considered to be a root.
//
// It then continues with a loop that first finds a tree of Ebbs ("discovery") and then removes
// It then continues with a loop that first finds a tree of Blocks ("discovery") and then removes
// redundant fills as described above ("processing"):
//
// * (discovery; once per tree): for each root, performs a depth first search to find all the Ebbs
// * (discovery; once per tree): for each root, performs a depth first search to find all the Blocks
// in the tree, guided by RedundantReloadRemover::discovery_stack.
//
// * (processing; once per tree): the just-discovered tree is then processed as described above,
// guided by RedundantReloadRemover::processing_stack.
//
// In this way, all Ebbs reachable from the function's entry point are eventually processed. Note
// In this way, all Blocks reachable from the function's entry point are eventually processed. Note
// that each tree is processed as soon as it has been discovered, so the algorithm never creates a
// list of trees for the function.
//
@@ -88,7 +89,7 @@ use cranelift_entity::{PrimaryMap, SecondaryMap};
// reused for multiple functions so as to minimise heap turnover. The fields are, roughly:
//
// num_regunits -- constant for the whole function; used by the tree processing phase
// num_preds_per_ebb -- constant for the whole function; used by the tree discovery process
// num_preds_per_block -- constant for the whole function; used by the tree discovery process
//
// discovery_stack -- used to guide the tree discovery process
// nodes_in_tree -- the discovered nodes are recorded here
@@ -121,8 +122,8 @@ use cranelift_entity::{PrimaryMap, SecondaryMap};
// =============================================================================================
// Data structures used for discovery of trees
// `ZeroOneOrMany` is used to record the number of predecessors an Ebb block has. The `Zero` case
// is included so as to cleanly handle the case where the incoming graph has unreachable Ebbs.
// `ZeroOneOrMany` is used to record the number of predecessors an Block block has. The `Zero` case
// is included so as to cleanly handle the case where the incoming graph has unreachable Blocks.
#[derive(Clone, PartialEq)]
enum ZeroOneOrMany {
@@ -183,23 +184,23 @@ struct AvailEnv {
}
// `ProcessingStackElem` combines AvailEnv with contextual information needed to "navigate" within
// an Ebb.
// an Block.
//
// A ProcessingStackElem conceptually has the lifetime of exactly one Ebb: once the current Ebb is
// A ProcessingStackElem conceptually has the lifetime of exactly one Block: once the current Block is
// completed, the ProcessingStackElem will be abandoned. In practice the top level state,
// RedundantReloadRemover, caches them, so as to avoid heap turnover.
//
// Note that ProcessingStackElem must contain a CursorPosition. The CursorPosition, which
// indicates where we are in the current Ebb, cannot be implicitly maintained by looping over all
// the instructions in an Ebb in turn, because we may choose to suspend processing the current Ebb
// indicates where we are in the current Block, cannot be implicitly maintained by looping over all
// the instructions in an Block in turn, because we may choose to suspend processing the current Block
// at a side exit, continue by processing the subtree reached via the side exit, and only later
// resume the current Ebb.
// resume the current Block.
struct ProcessingStackElem {
/// Indicates the AvailEnv at the current point in the Ebb.
/// Indicates the AvailEnv at the current point in the Block.
avail_env: AvailEnv,
/// Shows where we currently are inside the Ebb.
/// Shows where we currently are inside the Block.
cursor: CursorPosition,
/// Indicates the currently active register diversions at the current point.
@@ -212,7 +213,7 @@ struct ProcessingStackElem {
// `RedundantReloadRemover` contains data structures for the two passes: discovery of tree shaped
// regions, and processing of them. These are allocated once and stay alive for the entire
// function, even though they are cleared out for each new tree shaped region. It also caches
// `num_regunits` and `num_preds_per_ebb`, which are computed at the start of each function and
// `num_regunits` and `num_preds_per_block`, which are computed at the start of each function and
// then remain constant.
/// The redundant reload remover's state.
@@ -222,22 +223,22 @@ pub struct RedundantReloadRemover {
/// function.
num_regunits: Option<u16>,
/// This stores, for each Ebb, a characterisation of the number of predecessors it has.
num_preds_per_ebb: PrimaryMap<Ebb, ZeroOneOrMany>,
/// This stores, for each Block, a characterisation of the number of predecessors it has.
num_preds_per_block: PrimaryMap<Block, ZeroOneOrMany>,
/// The stack used for the first phase (discovery). There is one element on the discovery
/// stack for each currently unexplored Ebb in the tree being searched.
discovery_stack: Vec<Ebb>,
/// stack for each currently unexplored Block in the tree being searched.
discovery_stack: Vec<Block>,
/// The nodes in the discovered tree are inserted here.
nodes_in_tree: EntitySet<Ebb>,
nodes_in_tree: EntitySet<Block>,
/// The stack used during the second phase (transformation). There is one element on the
/// processing stack for each currently-open node in the tree being transformed.
processing_stack: Vec<ProcessingStackElem>,
/// Used in the second phase to avoid visiting nodes more than once.
nodes_already_visited: EntitySet<Ebb>,
nodes_already_visited: EntitySet<Block>,
}
// =============================================================================================
@@ -301,17 +302,17 @@ fn slot_of_value<'s>(
impl RedundantReloadRemover {
// A helper for `add_nodes_to_tree` below.
fn discovery_stack_push_successors_of(&mut self, cfg: &ControlFlowGraph, node: Ebb) {
fn discovery_stack_push_successors_of(&mut self, cfg: &ControlFlowGraph, node: Block) {
for successor in cfg.succ_iter(node) {
self.discovery_stack.push(successor);
}
}
// Visit the tree of Ebbs rooted at `starting_point` and add them to `self.nodes_in_tree`.
// `self.num_preds_per_ebb` guides the process, ensuring we don't leave the tree-ish region
// Visit the tree of Blocks rooted at `starting_point` and add them to `self.nodes_in_tree`.
// `self.num_preds_per_block` guides the process, ensuring we don't leave the tree-ish region
// and indirectly ensuring that the process will terminate in the presence of cycles in the
// graph. `self.discovery_stack` holds the search state in this function.
fn add_nodes_to_tree(&mut self, cfg: &ControlFlowGraph, starting_point: Ebb) {
fn add_nodes_to_tree(&mut self, cfg: &ControlFlowGraph, starting_point: Block) {
// One might well ask why this doesn't loop forever when it encounters cycles in the
// control flow graph. The reason is that any cycle in the graph that is reachable from
// anywhere outside the cycle -- in particular, that is reachable from the function's
@@ -325,7 +326,7 @@ impl RedundantReloadRemover {
self.discovery_stack_push_successors_of(cfg, starting_point);
while let Some(node) = self.discovery_stack.pop() {
match self.num_preds_per_ebb[node] {
match self.num_preds_per_block[node] {
// We arrived at a node with multiple predecessors, so it's a new root. Ignore it.
ZeroOneOrMany::Many => {}
// This node has just one predecessor, so we should incorporate it in the tree and
@@ -652,8 +653,8 @@ impl RedundantReloadRemover {
impl RedundantReloadRemover {
// Push a clone of the top-of-stack ProcessingStackElem. This will be used to process exactly
// one Ebb. The diversions are created new, rather than cloned, to reflect the fact
// that diversions are local to each Ebb.
// one Block. The diversions are created new, rather than cloned, to reflect the fact
// that diversions are local to each Block.
fn processing_stack_push(&mut self, cursor: CursorPosition) {
let avail_env = if let Some(stack_top) = self.processing_stack.last() {
stack_top.avail_env.clone()
@@ -674,7 +675,7 @@ impl RedundantReloadRemover {
// This pushes the node `dst` onto the processing stack, and sets up the new
// ProcessingStackElem accordingly. But it does all that only if `dst` is part of the current
// tree *and* we haven't yet visited it.
fn processing_stack_maybe_push(&mut self, dst: Ebb) {
fn processing_stack_maybe_push(&mut self, dst: Block) {
if self.nodes_in_tree.contains(dst) && !self.nodes_already_visited.contains(dst) {
if !self.processing_stack.is_empty() {
// If this isn't the outermost node in the tree (that is, the root), then it must
@@ -682,7 +683,7 @@ impl RedundantReloadRemover {
// incorporated in any tree. Nodes with two or more predecessors are the root of
// some other tree, and visiting them as if they were part of the current tree
// would be a serious error.
debug_assert!(self.num_preds_per_ebb[dst] == ZeroOneOrMany::One);
debug_assert!(self.num_preds_per_block[dst] == ZeroOneOrMany::One);
}
self.processing_stack_push(CursorPosition::Before(dst));
self.nodes_already_visited.insert(dst);
@@ -697,7 +698,7 @@ impl RedundantReloadRemover {
func: &mut Function,
reginfo: &RegInfo,
isa: &dyn TargetIsa,
root: Ebb,
root: Block,
) {
debug_assert!(self.nodes_in_tree.contains(root));
debug_assert!(self.processing_stack.is_empty());
@@ -728,10 +729,10 @@ impl RedundantReloadRemover {
// Update diversions after the insn.
self.processing_stack[tos].diversions.apply(&func.dfg[inst]);
// If the insn can branch outside this Ebb, push work items on the stack for all
// target Ebbs that are part of the same tree and that we haven't yet visited.
// If the insn can branch outside this Block, push work items on the stack for all
// target Blocks that are part of the same tree and that we haven't yet visited.
// The next iteration of this instruction-processing loop will immediately start
// work on the most recently pushed Ebb, and will eventually continue in this Ebb
// work on the most recently pushed Block, and will eventually continue in this Block
// when those new items have been removed from the stack.
match func.dfg.analyze_branch(inst) {
BranchInfo::NotABranch => (),
@@ -748,7 +749,7 @@ impl RedundantReloadRemover {
}
}
} else {
// We've come to the end of the current work-item (Ebb). We'll already have
// We've come to the end of the current work-item (Block). We'll already have
// processed the fallthrough/continuation/whatever for it using the logic above.
// Pop it off the stack and resume work on its parent.
self.processing_stack.pop();
@@ -765,11 +766,11 @@ impl RedundantReloadRemover {
pub fn new() -> Self {
Self {
num_regunits: None,
num_preds_per_ebb: PrimaryMap::<Ebb, ZeroOneOrMany>::with_capacity(8),
discovery_stack: Vec::<Ebb>::with_capacity(16),
nodes_in_tree: EntitySet::<Ebb>::new(),
num_preds_per_block: PrimaryMap::<Block, ZeroOneOrMany>::with_capacity(8),
discovery_stack: Vec::<Block>::with_capacity(16),
nodes_in_tree: EntitySet::<Block>::new(),
processing_stack: Vec::<ProcessingStackElem>::with_capacity(8),
nodes_already_visited: EntitySet::<Ebb>::new(),
nodes_already_visited: EntitySet::<Block>::new(),
}
}
@@ -779,7 +780,7 @@ impl RedundantReloadRemover {
}
fn clear_for_new_function(&mut self) {
self.num_preds_per_ebb.clear();
self.num_preds_per_block.clear();
self.clear_for_new_tree();
}
@@ -798,19 +799,19 @@ impl RedundantReloadRemover {
isa: &dyn TargetIsa,
cfg: &ControlFlowGraph,
) {
// Fail in an obvious way if there are more than (2^32)-1 Ebbs in this function.
let num_ebbs: u32 = func.dfg.num_ebbs().try_into().unwrap();
// Fail in an obvious way if there are more than (2^32)-1 Blocks in this function.
let num_blocks: u32 = func.dfg.num_blocks().try_into().unwrap();
// Clear out per-tree state.
self.clear_for_new_function();
// Create a PrimaryMap that summarises the number of predecessors for each block, as 0, 1
// or "many", and that also claims the entry block as having "many" predecessors.
self.num_preds_per_ebb.clear();
self.num_preds_per_ebb.reserve(num_ebbs as usize);
self.num_preds_per_block.clear();
self.num_preds_per_block.reserve(num_blocks as usize);
for i in 0..num_ebbs {
let mut pi = cfg.pred_iter(Ebb::from_u32(i));
for i in 0..num_blocks {
let mut pi = cfg.pred_iter(Block::from_u32(i));
let mut n_pi = ZeroOneOrMany::Zero;
if pi.next().is_some() {
n_pi = ZeroOneOrMany::One;
@@ -819,24 +820,24 @@ impl RedundantReloadRemover {
// We don't care if there are more than two preds, so stop counting now.
}
}
self.num_preds_per_ebb.push(n_pi);
self.num_preds_per_block.push(n_pi);
}
debug_assert!(self.num_preds_per_ebb.len() == num_ebbs as usize);
debug_assert!(self.num_preds_per_block.len() == num_blocks as usize);
// The entry block must be the root of some tree, so set up the state to reflect that.
let entry_ebb = func
let entry_block = func
.layout
.entry_block()
.expect("do_redundant_fill_removal_on_function: entry ebb unknown");
debug_assert!(self.num_preds_per_ebb[entry_ebb] == ZeroOneOrMany::Zero);
self.num_preds_per_ebb[entry_ebb] = ZeroOneOrMany::Many;
.expect("do_redundant_fill_removal_on_function: entry block unknown");
debug_assert!(self.num_preds_per_block[entry_block] == ZeroOneOrMany::Zero);
self.num_preds_per_block[entry_block] = ZeroOneOrMany::Many;
// Now build and process trees.
for root_ix in 0..self.num_preds_per_ebb.len() {
let root = Ebb::from_u32(root_ix as u32);
for root_ix in 0..self.num_preds_per_block.len() {
let root = Block::from_u32(root_ix as u32);
// Build a tree for each node that has two or more preds, and ignore all other nodes.
if self.num_preds_per_ebb[root] != ZeroOneOrMany::Many {
if self.num_preds_per_block[root] != ZeroOneOrMany::Many {
continue;
}
@@ -846,7 +847,7 @@ impl RedundantReloadRemover {
// Discovery phase: build the tree, as `root` and `self.nodes_in_tree`.
self.add_nodes_to_tree(cfg, root);
debug_assert!(self.nodes_in_tree.cardinality() > 0);
debug_assert!(self.num_preds_per_ebb[root] == ZeroOneOrMany::Many);
debug_assert!(self.num_preds_per_block[root] == ZeroOneOrMany::Many);
// Processing phase: do redundant-reload-removal.
self.process_tree(func, reginfo, isa, root);

View File

@@ -7,7 +7,7 @@ use alloc::vec::Vec;
use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList};
use crate::ir::{Block, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList};
use crate::isa::TargetIsa;
use crate::topo_order::TopoOrder;
@@ -43,12 +43,12 @@ struct Context<'a> {
impl<'a> Context<'a> {
fn run(&mut self) {
// Any ebb order will do.
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
// Any block order will do.
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
// Branches can only be at the last or second to last position in an extended basic
// block.
self.cur.goto_last_inst(ebb);
self.cur.goto_last_inst(block);
let terminator_inst = self.cur.current_inst().expect("terminator");
if let Some(inst) = self.cur.prev_inst() {
let opcode = self.cur.func.dfg[inst].opcode();
@@ -80,38 +80,38 @@ impl<'a> Context<'a> {
// If there are any parameters, split the edge.
if self.should_split_edge(target) {
// Create the block the branch will jump to.
let new_ebb = self.cur.func.dfg.make_ebb();
let new_block = self.cur.func.dfg.make_block();
// Insert the new block before the destination, such that it can fallthrough in the
// target block.
assert_ne!(Some(target), self.cur.layout().entry_block());
self.cur.layout_mut().insert_ebb(new_ebb, target);
self.cur.layout_mut().insert_block(new_block, target);
self.has_new_blocks = true;
// Extract the arguments of the branch instruction, split the Ebb parameters and the
// Extract the arguments of the branch instruction, split the Block parameters and the
// branch arguments
let num_fixed = opcode.constraints().num_fixed_value_arguments();
let dfg = &mut self.cur.func.dfg;
let old_args: Vec<_> = {
let args = dfg[branch].take_value_list().expect("ebb parameters");
let args = dfg[branch].take_value_list().expect("block parameters");
args.as_slice(&dfg.value_lists).iter().copied().collect()
};
let (branch_args, ebb_params) = old_args.split_at(num_fixed);
let (branch_args, block_params) = old_args.split_at(num_fixed);
// Replace the branch destination by the new Ebb created with no parameters, and restore
// the branch arguments, without the original Ebb parameters.
// Replace the branch destination by the new Block created with no parameters, and restore
// the branch arguments, without the original Block parameters.
{
let branch_args = ValueList::from_slice(branch_args, &mut dfg.value_lists);
let data = &mut dfg[branch];
*data.branch_destination_mut().expect("branch") = new_ebb;
*data.branch_destination_mut().expect("branch") = new_block;
data.put_value_list(branch_args);
}
let ok = self.cur.func.update_encoding(branch, self.cur.isa).is_ok();
debug_assert!(ok);
// Insert a jump to the original target with its arguments into the new block.
self.cur.goto_first_insertion_point(new_ebb);
self.cur.ins().jump(target, ebb_params);
self.cur.goto_first_insertion_point(new_block);
self.cur.ins().jump(target, block_params);
// Reset the cursor to point to the branch.
self.cur.goto_inst(branch);
@@ -122,7 +122,7 @@ impl<'a> Context<'a> {
let inst_data = &self.cur.func.dfg[inst];
let opcode = inst_data.opcode();
if opcode != Opcode::Jump && opcode != Opcode::Fallthrough {
// This opcode is ignored as it does not have any EBB parameters.
// This opcode is ignored as it does not have any block parameters.
if opcode != Opcode::IndirectJumpTableBr {
debug_assert!(!opcode.is_branch())
}
@@ -141,23 +141,23 @@ impl<'a> Context<'a> {
// If there are any parameters, split the edge.
if self.should_split_edge(*target) {
// Create the block the branch will jump to.
let new_ebb = self.cur.func.dfg.make_ebb();
let new_block = self.cur.func.dfg.make_block();
self.has_new_blocks = true;
// Split the current block before its terminator, and insert a new jump instruction to
// jump to it.
let jump = self.cur.ins().jump(new_ebb, &[]);
self.cur.insert_ebb(new_ebb);
let jump = self.cur.ins().jump(new_block, &[]);
self.cur.insert_block(new_block);
// Reset the cursor to point to new terminator of the old ebb.
// Reset the cursor to point to new terminator of the old block.
self.cur.goto_inst(jump);
}
}
/// Returns whether we should introduce a new branch.
fn should_split_edge(&self, target: Ebb) -> bool {
fn should_split_edge(&self, target: Block) -> bool {
// We should split the edge if the target has any parameters.
if !self.cur.func.dfg.ebb_params(target).is_empty() {
if !self.cur.func.dfg.block_params(target).is_empty() {
return true;
};

View File

@@ -2,16 +2,16 @@
//!
//! Conventional SSA (CSSA) form is a subset of SSA form where any (transitively) phi-related
//! values do not interfere. We construct CSSA by building virtual registers that are as large as
//! possible and inserting copies where necessary such that all argument values passed to an EBB
//! parameter will belong to the same virtual register as the EBB parameter value itself.
//! possible and inserting copies where necessary such that all argument values passed to an block
//! parameter will belong to the same virtual register as the block parameter value itself.
use crate::cursor::{Cursor, EncCursor};
use crate::dbg::DisplayList;
use crate::dominator_tree::{DominatorTree, DominatorTreePreorder};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::fx::FxHashMap;
use crate::ir::{self, InstBuilder, ProgramOrder};
use crate::ir::{Ebb, ExpandedProgramPoint, Function, Inst, Value};
use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Value};
use crate::isa::{EncInfo, TargetIsa};
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liveness::Liveness;
@@ -40,8 +40,8 @@ use log::debug;
//
// Phase 1: Union-find.
//
// We use the union-find support in `VirtRegs` to build virtual registers such that EBB parameter
// values always belong to the same virtual register as their corresponding EBB arguments at the
// We use the union-find support in `VirtRegs` to build virtual registers such that block parameter
// values always belong to the same virtual register as their corresponding block arguments at the
// predecessor branches. Trivial interferences between parameter and argument value live ranges are
// detected and resolved before unioning congruence classes, but non-trivial interferences between
// values that end up in the same congruence class are possible.
@@ -135,8 +135,8 @@ impl Coalescing {
};
// Run phase 1 (union-find) of the coalescing algorithm on the current function.
for &ebb in domtree.cfg_postorder() {
context.union_find_ebb(ebb);
for &block in domtree.cfg_postorder() {
context.union_find_block(block);
}
context.finish_union_find();
@@ -147,114 +147,114 @@ impl Coalescing {
/// Phase 1: Union-find.
///
/// The two entry points for phase 1 are `union_find_ebb()` and `finish_union_find`.
/// The two entry points for phase 1 are `union_find_block()` and `finish_union_find`.
impl<'a> Context<'a> {
/// Run the union-find algorithm on the parameter values on `ebb`.
/// Run the union-find algorithm on the parameter values on `block`.
///
/// This ensure that all EBB parameters will belong to the same virtual register as their
/// This ensure that all block parameters will belong to the same virtual register as their
/// corresponding arguments at all predecessor branches.
pub fn union_find_ebb(&mut self, ebb: Ebb) {
let num_params = self.func.dfg.num_ebb_params(ebb);
pub fn union_find_block(&mut self, block: Block) {
let num_params = self.func.dfg.num_block_params(block);
if num_params == 0 {
return;
}
self.isolate_conflicting_params(ebb, num_params);
self.isolate_conflicting_params(block, num_params);
for i in 0..num_params {
self.union_pred_args(ebb, i);
self.union_pred_args(block, i);
}
}
// Identify EBB parameter values that are live at one of the predecessor branches.
// Identify block parameter values that are live at one of the predecessor branches.
//
// Such a parameter value will conflict with any argument value at the predecessor branch, so
// it must be isolated by inserting a copy.
fn isolate_conflicting_params(&mut self, ebb: Ebb, num_params: usize) {
debug_assert_eq!(num_params, self.func.dfg.num_ebb_params(ebb));
// The only way a parameter value can interfere with a predecessor branch is if the EBB is
fn isolate_conflicting_params(&mut self, block: Block, num_params: usize) {
debug_assert_eq!(num_params, self.func.dfg.num_block_params(block));
// The only way a parameter value can interfere with a predecessor branch is if the block is
// dominating the predecessor branch. That is, we are looking for loop back-edges.
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
// The quick pre-order dominance check is accurate because the EBB parameter is defined
// at the top of the EBB before any branches.
if !self.preorder.dominates(ebb, pred_ebb) {
// The quick pre-order dominance check is accurate because the block parameter is defined
// at the top of the block before any branches.
if !self.preorder.dominates(block, pred_block) {
continue;
}
debug!(
" - checking {} params at back-edge {}: {}",
num_params,
pred_ebb,
pred_block,
self.func.dfg.display_inst(pred_inst, self.isa)
);
// Now `pred_inst` is known to be a back-edge, so it is possible for parameter values
// to be live at the use.
for i in 0..num_params {
let param = self.func.dfg.ebb_params(ebb)[i];
if self.liveness[param].reaches_use(pred_inst, pred_ebb, &self.func.layout) {
self.isolate_param(ebb, param);
let param = self.func.dfg.block_params(block)[i];
if self.liveness[param].reaches_use(pred_inst, pred_block, &self.func.layout) {
self.isolate_param(block, param);
}
}
}
}
// Union EBB parameter value `num` with the corresponding EBB arguments on the predecessor
// Union block parameter value `num` with the corresponding block arguments on the predecessor
// branches.
//
// Detect cases where the argument value is live-in to `ebb` so it conflicts with any EBB
// Detect cases where the argument value is live-in to `block` so it conflicts with any block
// parameter. Isolate the argument in those cases before unioning it with the parameter value.
fn union_pred_args(&mut self, ebb: Ebb, argnum: usize) {
let param = self.func.dfg.ebb_params(ebb)[argnum];
fn union_pred_args(&mut self, block: Block, argnum: usize) {
let param = self.func.dfg.block_params(block)[argnum];
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
let arg = self.func.dfg.inst_variable_args(pred_inst)[argnum];
// Never coalesce incoming function parameters on the stack. These parameters are
// pre-spilled, and the rest of the virtual register would be forced to spill to the
// `incoming_arg` stack slot too.
if let ir::ValueDef::Param(def_ebb, def_num) = self.func.dfg.value_def(arg) {
if Some(def_ebb) == self.func.layout.entry_block()
if let ir::ValueDef::Param(def_block, def_num) = self.func.dfg.value_def(arg) {
if Some(def_block) == self.func.layout.entry_block()
&& self.func.signature.params[def_num].location.is_stack()
{
debug!("-> isolating function stack parameter {}", arg);
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs.union(param, new_arg);
continue;
}
}
// Check for basic interference: If `arg` overlaps a value defined at the entry to
// `ebb`, it can never be used as an EBB argument.
// `block`, it can never be used as an block argument.
let interference = {
let lr = &self.liveness[arg];
// There are two ways the argument value can interfere with `ebb`:
// There are two ways the argument value can interfere with `block`:
//
// 1. It is defined in a dominating EBB and live-in to `ebb`.
// 2. If is itself a parameter value for `ebb`. This case should already have been
// 1. It is defined in a dominating block and live-in to `block`.
// 2. If is itself a parameter value for `block`. This case should already have been
// eliminated by `isolate_conflicting_params()`.
debug_assert!(
lr.def() != ebb.into(),
lr.def() != block.into(),
"{} parameter {} was missed by isolate_conflicting_params()",
ebb,
block,
arg
);
// The only other possibility is that `arg` is live-in to `ebb`.
lr.is_livein(ebb, &self.func.layout)
// The only other possibility is that `arg` is live-in to `block`.
lr.is_livein(block, &self.func.layout)
};
if interference {
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs.union(param, new_arg);
} else {
self.virtregs.union(param, arg);
@@ -262,31 +262,31 @@ impl<'a> Context<'a> {
}
}
// Isolate EBB parameter value `param` on `ebb`.
// Isolate block parameter value `param` on `block`.
//
// When `param=v10`:
//
// ebb1(v10: i32):
// block1(v10: i32):
// foo
//
// becomes:
//
// ebb1(v11: i32):
// block1(v11: i32):
// v10 = copy v11
// foo
//
// This function inserts the copy and updates the live ranges of the old and new parameter
// values. Returns the new parameter value.
fn isolate_param(&mut self, ebb: Ebb, param: Value) -> Value {
fn isolate_param(&mut self, block: Block, param: Value) -> Value {
debug_assert_eq!(
self.func.dfg.value_def(param).pp(),
ExpandedProgramPoint::Ebb(ebb)
ExpandedProgramPoint::Block(block)
);
let ty = self.func.dfg.value_type(param);
let new_val = self.func.dfg.replace_ebb_param(param, ty);
let new_val = self.func.dfg.replace_block_param(param, ty);
// Insert a copy instruction at the top of `ebb`.
let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(ebb);
// Insert a copy instruction at the top of `block`.
let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(block);
if let Some(inst) = pos.current_inst() {
pos.use_srcloc(inst);
}
@@ -297,7 +297,7 @@ impl<'a> Context<'a> {
debug!(
"-> inserted {}, following {}({}: {})",
pos.display_inst(inst),
ebb,
block,
new_val,
ty
);
@@ -311,27 +311,27 @@ impl<'a> Context<'a> {
.expect("Bad copy encoding")
.outs[0],
);
self.liveness.create_dead(new_val, ebb, affinity);
self.liveness.create_dead(new_val, block, affinity);
self.liveness
.extend_locally(new_val, ebb, inst, &pos.func.layout);
.extend_locally(new_val, block, inst, &pos.func.layout);
new_val
}
// Isolate the EBB argument `pred_val` from the predecessor `(pred_ebb, pred_inst)`.
// Isolate the block argument `pred_val` from the predecessor `(pred_block, pred_inst)`.
//
// It is assumed that `pred_inst` is a branch instruction in `pred_ebb` whose `argnum`'th EBB
// argument is `pred_val`. Since the argument value interferes with the corresponding EBB
// It is assumed that `pred_inst` is a branch instruction in `pred_block` whose `argnum`'th block
// argument is `pred_val`. Since the argument value interferes with the corresponding block
// parameter at the destination, a copy is used instead:
//
// brnz v1, ebb2(v10)
// brnz v1, block2(v10)
//
// Becomes:
//
// v11 = copy v10
// brnz v1, ebb2(v11)
// brnz v1, block2(v11)
//
// This way the interference with the EBB parameter is avoided.
// This way the interference with the block parameter is avoided.
//
// A live range for the new value is created while the live range for `pred_val` is left
// unaltered.
@@ -339,7 +339,7 @@ impl<'a> Context<'a> {
// The new argument value is returned.
fn isolate_arg(
&mut self,
pred_ebb: Ebb,
pred_block: Block,
pred_inst: Inst,
argnum: usize,
pred_val: Value,
@@ -360,14 +360,14 @@ impl<'a> Context<'a> {
);
self.liveness.create_dead(copy, inst, affinity);
self.liveness
.extend_locally(copy, pred_ebb, pred_inst, &pos.func.layout);
.extend_locally(copy, pred_block, pred_inst, &pos.func.layout);
pos.func.dfg.inst_variable_args_mut(pred_inst)[argnum] = copy;
debug!(
"-> inserted {}, before {}: {}",
pos.display_inst(inst),
pred_ebb,
pred_block,
pos.display_inst(pred_inst)
);
@@ -377,7 +377,7 @@ impl<'a> Context<'a> {
/// Finish the union-find part of the coalescing algorithm.
///
/// This builds the initial set of virtual registers as the transitive/reflexive/symmetric
/// closure of the relation formed by EBB parameter-argument pairs found by `union_find_ebb()`.
/// closure of the relation formed by block parameter-argument pairs found by `union_find_block()`.
fn finish_union_find(&mut self) {
self.virtregs.finish_union_find(None);
debug!("After union-find phase:{}", self.virtregs);
@@ -430,7 +430,7 @@ impl<'a> Context<'a> {
// Check for interference between `parent` and `value`. Since `parent` dominates
// `value`, we only have to check if it overlaps the definition.
if self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout) {
if self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout) {
// The two values are interfering, so they can't be in the same virtual register.
debug!("-> interference: {} overlaps def of {}", parent, value);
return false;
@@ -470,9 +470,9 @@ impl<'a> Context<'a> {
}
}
/// Merge EBB parameter value `param` with virtual registers at its predecessors.
/// Merge block parameter value `param` with virtual registers at its predecessors.
fn merge_param(&mut self, param: Value) {
let (ebb, argnum) = match self.func.dfg.value_def(param) {
let (block, argnum) = match self.func.dfg.value_def(param) {
ir::ValueDef::Param(e, n) => (e, n),
ir::ValueDef::Result(_, _) => panic!("Expected parameter"),
};
@@ -493,12 +493,12 @@ impl<'a> Context<'a> {
// not loop backedges.
debug_assert!(self.predecessors.is_empty());
debug_assert!(self.backedges.is_empty());
for BasicBlock {
ebb: pred_ebb,
for BlockPredecessor {
block: pred_block,
inst: pred_inst,
} in self.cfg.pred_iter(ebb)
} in self.cfg.pred_iter(block)
{
if self.preorder.dominates(ebb, pred_ebb) {
if self.preorder.dominates(block, pred_block) {
self.backedges.push(pred_inst);
} else {
self.predecessors.push(pred_inst);
@@ -522,8 +522,8 @@ impl<'a> Context<'a> {
}
// Can't merge because of interference. Insert a copy instead.
let pred_ebb = self.func.layout.pp_ebb(pred_inst);
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
let pred_block = self.func.layout.pp_block(pred_inst);
let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg);
self.virtregs
.insert_single(param, new_arg, self.func, self.preorder);
}
@@ -616,12 +616,12 @@ impl<'a> Context<'a> {
// Check if the parent value interferes with the virtual copy.
let inst = node.def.unwrap_inst();
if node.set_id != parent.set_id
&& self.liveness[parent.value].reaches_use(inst, node.ebb, &self.func.layout)
&& self.liveness[parent.value].reaches_use(inst, node.block, &self.func.layout)
{
debug!(
" - interference: {} overlaps vcopy at {}:{}",
parent,
node.ebb,
node.block,
self.func.dfg.display_inst(inst, self.isa)
);
return false;
@@ -640,7 +640,7 @@ impl<'a> Context<'a> {
// Both node and parent are values, so check for interference.
debug_assert!(node.is_value() && parent.is_value());
if node.set_id != parent.set_id
&& self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout)
&& self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout)
{
// The two values are interfering.
debug!(" - interference: {} overlaps def of {}", parent, node.value);
@@ -663,7 +663,7 @@ impl<'a> Context<'a> {
///
/// The idea of a dominator forest was introduced on the Budimlic paper and the linear stack
/// representation in the Boissinot paper. Our version of the linear stack is slightly modified
/// because we have a pre-order of the dominator tree at the EBB granularity, not basic block
/// because we have a pre-order of the dominator tree at the block granularity, not basic block
/// granularity.
///
/// Values are pushed in dominator tree pre-order of their definitions, and for each value pushed,
@@ -673,7 +673,7 @@ struct DomForest {
// Stack representing the rightmost edge of the dominator forest so far, ending in the last
// element of `values`.
//
// At all times, the EBB of each element in the stack dominates the EBB of the next one.
// At all times, the block of each element in the stack dominates the block of the next one.
stack: Vec<Node>,
}
@@ -683,8 +683,8 @@ struct DomForest {
struct Node {
/// The program point where the live range is defined.
def: ExpandedProgramPoint,
/// EBB containing `def`.
ebb: Ebb,
/// block containing `def`.
block: Block,
/// Is this a virtual copy or a value?
is_vcopy: bool,
/// Set identifier.
@@ -698,10 +698,10 @@ impl Node {
/// Create a node representing `value`.
pub fn value(value: Value, set_id: u8, func: &Function) -> Self {
let def = func.dfg.value_def(value).pp();
let ebb = func.layout.pp_ebb(def);
let block = func.layout.pp_block(def);
Self {
def,
ebb,
block,
is_vcopy: false,
set_id,
value,
@@ -711,10 +711,10 @@ impl Node {
/// Create a node representing a virtual copy.
pub fn vcopy(branch: Inst, value: Value, set_id: u8, func: &Function) -> Self {
let def = branch.into();
let ebb = func.layout.pp_ebb(def);
let block = func.layout.pp_block(def);
Self {
def,
ebb,
block,
is_vcopy: true,
set_id,
value,
@@ -730,9 +730,9 @@ impl Node {
impl fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_vcopy {
write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.ebb)
write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.block)
} else {
write!(f, "{}:{}@{}", self.set_id, self.value, self.ebb)
write!(f, "{}:{}@{}", self.set_id, self.value, self.block)
}
}
}
@@ -760,16 +760,16 @@ impl DomForest {
preorder: &DominatorTreePreorder,
) -> Option<Node> {
// The stack contains the current sequence of dominating defs. Pop elements until we
// find one whose EBB dominates `node.ebb`.
// find one whose block dominates `node.block`.
while let Some(top) = self.stack.pop() {
if preorder.dominates(top.ebb, node.ebb) {
if preorder.dominates(top.block, node.block) {
// This is the right insertion spot for `node`.
self.stack.push(top);
self.stack.push(node);
// We know here that `top.ebb` dominates `node.ebb`, and thus `node.def`. This does
// We know here that `top.block` dominates `node.block`, and thus `node.def`. This does
// not necessarily mean that `top.def` dominates `node.def`, though. The `top.def`
// program point may be below the last branch in `top.ebb` that dominates
// program point may be below the last branch in `top.block` that dominates
// `node.def`.
//
// We do know, though, that if there is a nearest value dominating `node.def`, it
@@ -777,16 +777,16 @@ impl DomForest {
// dominates.
let mut last_dom = node.def;
for &n in self.stack.iter().rev().skip(1) {
// If the node is defined at the EBB header, it does in fact dominate
// If the node is defined at the block header, it does in fact dominate
// everything else pushed on the stack.
let def_inst = match n.def {
ExpandedProgramPoint::Ebb(_) => return Some(n),
ExpandedProgramPoint::Block(_) => return Some(n),
ExpandedProgramPoint::Inst(i) => i,
};
// We need to find the last program point in `n.ebb` to dominate `node.def`.
last_dom = match domtree.last_dominator(n.ebb, last_dom, &func.layout) {
None => n.ebb.into(),
// We need to find the last program point in `n.block` to dominate `node.def`.
last_dom = match domtree.last_dominator(n.block, last_dom, &func.layout) {
None => n.block.into(),
Some(inst) => {
if func.layout.cmp(def_inst, inst) != cmp::Ordering::Greater {
return Some(n);
@@ -816,18 +816,18 @@ impl DomForest {
/// When building a full virtual register at once, like phase 1 does with union-find, it is good
/// enough to check for interference between the values in the full virtual register like
/// `check_vreg()` does. However, in phase 2 we are doing pairwise merges of partial virtual
/// registers that don't represent the full transitive closure of the EBB argument-parameter
/// registers that don't represent the full transitive closure of the block argument-parameter
/// relation. This means that just checking for interference between values is inadequate.
///
/// Example:
///
/// v1 = iconst.i32 1
/// brnz v10, ebb1(v1)
/// brnz v10, block1(v1)
/// v2 = iconst.i32 2
/// brnz v11, ebb1(v2)
/// brnz v11, block1(v2)
/// return v1
///
/// ebb1(v3: i32):
/// block1(v3: i32):
/// v4 = iadd v3, v1
///
/// With just value interference checking, we could build the virtual register [v3, v1] since those
@@ -835,13 +835,13 @@ impl DomForest {
/// interfere. However, we can't resolve that interference either by inserting a copy:
///
/// v1 = iconst.i32 1
/// brnz v10, ebb1(v1)
/// brnz v10, block1(v1)
/// v2 = iconst.i32 2
/// v20 = copy v2 <-- new value
/// brnz v11, ebb1(v20)
/// brnz v11, block1(v20)
/// return v1
///
/// ebb1(v3: i32):
/// block1(v3: i32):
/// v4 = iadd v3, v1
///
/// The new value v20 still interferes with v1 because v1 is live across the "brnz v11" branch. We
@@ -851,32 +851,32 @@ impl DomForest {
/// instructions, then attempting to delete the copies. This is quite expensive because it involves
/// creating a large number of copies and value.
///
/// We'll detect this form of interference with *virtual copies*: Each EBB parameter value that
/// hasn't yet been fully merged with its EBB argument values is given a set of virtual copies at
/// We'll detect this form of interference with *virtual copies*: Each block parameter value that
/// hasn't yet been fully merged with its block argument values is given a set of virtual copies at
/// the predecessors. Any candidate value to be merged is checked for interference against both the
/// virtual register and the virtual copies.
///
/// In the general case, we're checking if two virtual registers can be merged, and both can
/// contain incomplete EBB parameter values with associated virtual copies.
/// contain incomplete block parameter values with associated virtual copies.
///
/// The `VirtualCopies` struct represents a set of incomplete parameters and their associated
/// virtual copies. Given two virtual registers, it can produce an ordered sequence of nodes
/// representing the virtual copies in both vregs.
struct VirtualCopies {
// Incomplete EBB parameters. These don't need to belong to the same virtual register.
// Incomplete block parameters. These don't need to belong to the same virtual register.
params: Vec<Value>,
// Set of `(branch, destination)` pairs. These are all the predecessor branches for the EBBs
// Set of `(branch, destination)` pairs. These are all the predecessor branches for the blocks
// whose parameters can be found in `params`.
//
// Ordered by dominator tree pre-order of the branch instructions.
branches: Vec<(Inst, Ebb)>,
branches: Vec<(Inst, Block)>,
// Filter for the currently active node iterator.
//
// An ebb => (set_id, num) entry means that branches to `ebb` are active in `set_id` with
// An block => (set_id, num) entry means that branches to `block` are active in `set_id` with
// branch argument number `num`.
filter: FxHashMap<Ebb, (u8, usize)>,
filter: FxHashMap<Block, (u8, usize)>,
}
impl VirtualCopies {
@@ -901,7 +901,7 @@ impl VirtualCopies {
///
/// The values are assumed to be in domtree pre-order.
///
/// This will extract the EBB parameter values and associate virtual copies all of them.
/// This will extract the block parameter values and associate virtual copies all of them.
pub fn initialize(
&mut self,
values: &[Value],
@@ -911,29 +911,29 @@ impl VirtualCopies {
) {
self.clear();
let mut last_ebb = None;
let mut last_block = None;
for &val in values {
if let ir::ValueDef::Param(ebb, _) = func.dfg.value_def(val) {
if let ir::ValueDef::Param(block, _) = func.dfg.value_def(val) {
self.params.push(val);
// We may have multiple parameters from the same EBB, but we only need to collect
// We may have multiple parameters from the same block, but we only need to collect
// predecessors once. Also verify the ordering of values.
if let Some(last) = last_ebb {
match preorder.pre_cmp_ebb(last, ebb) {
if let Some(last) = last_block {
match preorder.pre_cmp_block(last, block) {
cmp::Ordering::Less => {}
cmp::Ordering::Equal => continue,
cmp::Ordering::Greater => panic!("values in wrong order"),
}
}
// This EBB hasn't been seen before.
for BasicBlock {
// This block hasn't been seen before.
for BlockPredecessor {
inst: pred_inst, ..
} in cfg.pred_iter(ebb)
} in cfg.pred_iter(block)
{
self.branches.push((pred_inst, ebb));
self.branches.push((pred_inst, block));
}
last_ebb = Some(ebb);
last_block = Some(block);
}
}
@@ -953,7 +953,7 @@ impl VirtualCopies {
debug_assert_eq!(popped, Some(param));
// The domtree pre-order in `self.params` guarantees that all parameters defined at the
// same EBB will be adjacent. This means we can see when all parameters at an EBB have been
// same block will be adjacent. This means we can see when all parameters at an block have been
// merged.
//
// We don't care about the last parameter - when that is merged we are done.
@@ -961,16 +961,16 @@ impl VirtualCopies {
None => return,
Some(x) => *x,
};
let ebb = func.dfg.value_def(param).unwrap_ebb();
if func.dfg.value_def(last).unwrap_ebb() == ebb {
// We're not done with `ebb` parameters yet.
let block = func.dfg.value_def(param).unwrap_block();
if func.dfg.value_def(last).unwrap_block() == block {
// We're not done with `block` parameters yet.
return;
}
// Alright, we know there are no remaining `ebb` parameters in `self.params`. This means we
// can get rid of the `ebb` predecessors in `self.branches`. We don't have to, the
// Alright, we know there are no remaining `block` parameters in `self.params`. This means we
// can get rid of the `block` predecessors in `self.branches`. We don't have to, the
// `VCopyIter` will just skip them, but this reduces its workload.
self.branches.retain(|&(_, dest)| dest != ebb);
self.branches.retain(|&(_, dest)| dest != block);
}
/// Set a filter for the virtual copy nodes we're generating.
@@ -991,28 +991,28 @@ impl VirtualCopies {
// removed from the back once they are fully merged. This means we can stop looking for
// parameters once we're beyond the last one.
let last_param = *self.params.last().expect("No more parameters");
let limit = func.dfg.value_def(last_param).unwrap_ebb();
let limit = func.dfg.value_def(last_param).unwrap_block();
for (set_id, repr) in reprs.iter().enumerate() {
let set_id = set_id as u8;
for &value in virtregs.congruence_class(repr) {
if let ir::ValueDef::Param(ebb, num) = func.dfg.value_def(value) {
if preorder.pre_cmp_ebb(ebb, limit) == cmp::Ordering::Greater {
if let ir::ValueDef::Param(block, num) = func.dfg.value_def(value) {
if preorder.pre_cmp_block(block, limit) == cmp::Ordering::Greater {
// Stop once we're outside the bounds of `self.params`.
break;
}
self.filter.insert(ebb, (set_id, num));
self.filter.insert(block, (set_id, num));
}
}
}
}
/// Look up the set_id and argument number for `ebb` in the current filter.
/// Look up the set_id and argument number for `block` in the current filter.
///
/// Returns `None` if none of the currently active parameters are defined at `ebb`. Otherwise
/// returns `(set_id, argnum)` for an active parameter defined at `ebb`.
fn lookup(&self, ebb: Ebb) -> Option<(u8, usize)> {
self.filter.get(&ebb).cloned()
/// Returns `None` if none of the currently active parameters are defined at `block`. Otherwise
/// returns `(set_id, argnum)` for an active parameter defined at `block`.
fn lookup(&self, block: Block) -> Option<(u8, usize)> {
self.filter.get(&block).cloned()
}
/// Get an iterator of dom-forest nodes corresponding to the current filter.
@@ -1032,7 +1032,7 @@ impl VirtualCopies {
struct VCopyIter<'a> {
func: &'a Function,
vcopies: &'a VirtualCopies,
branches: slice::Iter<'a, (Inst, Ebb)>,
branches: slice::Iter<'a, (Inst, Block)>,
}
impl<'a> Iterator for VCopyIter<'a> {
@@ -1090,7 +1090,7 @@ where
(Some(a), Some(b)) => {
let layout = self.layout;
self.preorder
.pre_cmp_ebb(a.ebb, b.ebb)
.pre_cmp_block(a.block, b.block)
.then_with(|| layout.cmp(a.def, b.def))
}
(Some(_), None) => cmp::Ordering::Less,

View File

@@ -24,8 +24,8 @@
//! a register.
//!
//! 5. The code must be in Conventional SSA form. Among other things, this means that values passed
//! as arguments when branching to an EBB must belong to the same virtual register as the
//! corresponding EBB argument value.
//! as arguments when branching to an block must belong to the same virtual register as the
//! corresponding block argument value.
//!
//! # Iteration order
//!
@@ -35,10 +35,10 @@
//! defined by the instruction and only consider the colors of other values that are live at the
//! instruction.
//!
//! The first time we see a branch to an EBB, the EBB's argument values are colored to match the
//! The first time we see a branch to an block, the block's argument values are colored to match the
//! registers currently holding branch argument values passed to the predecessor branch. By
//! visiting EBBs in a CFG topological order, we guarantee that at least one predecessor branch has
//! been visited before the destination EBB. Therefore, the EBB's arguments are already colored.
//! visiting blocks in a CFG topological order, we guarantee that at least one predecessor branch has
//! been visited before the destination block. Therefore, the block's arguments are already colored.
//!
//! The exception is the entry block whose arguments are colored from the ABI requirements.
@@ -46,7 +46,7 @@ use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{ArgumentLoc, InstBuilder, ValueDef};
use crate::ir::{Ebb, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc};
use crate::ir::{Block, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc};
use crate::isa::{regs_overlap, RegClass, RegInfo, RegUnit};
use crate::isa::{ConstraintKind, EncInfo, OperandConstraint, RecipeConstraints, TargetIsa};
use crate::packed_option::PackedOption;
@@ -168,20 +168,20 @@ impl<'a> Context<'a> {
.resize(self.cur.func.dfg.num_values());
// Visit blocks in reverse post-order. We need to ensure that at least one predecessor has
// been visited before each EBB. That guarantees that the EBB arguments have been colored.
for &ebb in self.domtree.cfg_postorder().iter().rev() {
self.visit_ebb(ebb, tracker);
// been visited before each block. That guarantees that the block arguments have been colored.
for &block in self.domtree.cfg_postorder().iter().rev() {
self.visit_block(block, tracker);
}
}
/// Visit `ebb`, assuming that the immediate dominator has already been visited.
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Coloring {}:", ebb);
let mut regs = self.visit_ebb_header(ebb, tracker);
/// Visit `block`, assuming that the immediate dominator has already been visited.
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Coloring {}:", block);
let mut regs = self.visit_block_header(block, tracker);
tracker.drop_dead_params();
// Now go through the instructions in `ebb` and color the values they define.
self.cur.goto_top(ebb);
// Now go through the instructions in `block` and color the values they define.
self.cur.goto_top(block);
while let Some(inst) = self.cur.next_inst() {
self.cur.use_srcloc(inst);
let opcode = self.cur.func.dfg[inst].opcode();
@@ -204,7 +204,7 @@ impl<'a> Context<'a> {
tracker.drop_dead(inst);
// We are not able to insert any regmove for diversion or un-diversion after the first
// branch. Instead, we record the diversion to be restored at the entry of the next EBB,
// branch. Instead, we record the diversion to be restored at the entry of the next block,
// which should have a single predecessor.
if opcode.is_branch() {
// The next instruction is necessarily an unconditional branch.
@@ -221,15 +221,15 @@ impl<'a> Context<'a> {
"unexpected instruction {} after a conditional branch",
self.cur.display_inst(branch)
),
SingleDest(ebb, _) => ebb,
SingleDest(block, _) => block,
};
// We have a single branch with a single target, and an EBB with a single
// predecessor. Thus we can forward the diversion set to the next EBB.
// We have a single branch with a single target, and an block with a single
// predecessor. Thus we can forward the diversion set to the next block.
if self.cfg.pred_iter(target).count() == 1 {
// Transfer the diversion to the next EBB.
// Transfer the diversion to the next block.
self.divert
.save_for_ebb(&mut self.cur.func.entry_diversions, target);
.save_for_block(&mut self.cur.func.entry_diversions, target);
debug!(
"Set entry-diversion for {} to\n {}",
target,
@@ -253,13 +253,17 @@ impl<'a> Context<'a> {
}
}
/// Visit the `ebb` header.
/// Visit the `block` header.
///
/// Initialize the set of live registers and color the arguments to `ebb`.
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) -> AvailableRegs {
// Reposition the live value tracker and deal with the EBB arguments.
tracker.ebb_top(
ebb,
/// Initialize the set of live registers and color the arguments to `block`.
fn visit_block_header(
&mut self,
block: Block,
tracker: &mut LiveValueTracker,
) -> AvailableRegs {
// Reposition the live value tracker and deal with the block arguments.
tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
@@ -268,18 +272,18 @@ impl<'a> Context<'a> {
// Copy the content of the registered diversions to be reused at the
// entry of this basic block.
self.divert.at_ebb(&self.cur.func.entry_diversions, ebb);
self.divert.at_block(&self.cur.func.entry_diversions, block);
debug!(
"Start {} with entry-diversion set to\n {}",
ebb,
block,
self.divert.display(&self.reginfo)
);
if self.cur.func.layout.entry_block() == Some(ebb) {
if self.cur.func.layout.entry_block() == Some(block) {
// Parameters on the entry block have ABI constraints.
self.color_entry_params(tracker.live())
} else {
// The live-ins and parameters of a non-entry EBB have already been assigned a register.
// The live-ins and parameters of a non-entry block have already been assigned a register.
// Reconstruct the allocatable set.
self.livein_regs(tracker.live())
}
@@ -288,7 +292,7 @@ impl<'a> Context<'a> {
/// Initialize a set of allocatable registers from the values that are live-in to a block.
/// These values must already be colored when the dominating blocks were processed.
///
/// Also process the EBB arguments which were colored when the first predecessor branch was
/// Also process the block arguments which were colored when the first predecessor branch was
/// encountered.
fn livein_regs(&self, live: &[LiveValue]) -> AvailableRegs {
// Start from the registers that are actually usable. We don't want to include any reserved
@@ -428,7 +432,7 @@ impl<'a> Context<'a> {
regs.input.display(&self.reginfo),
);
// EBB whose arguments should be colored to match the current branch instruction's
// block whose arguments should be colored to match the current branch instruction's
// arguments.
let mut color_dest_args = None;
@@ -446,10 +450,10 @@ impl<'a> Context<'a> {
self.program_input_abi(inst, AbiParams::Returns);
} else if self.cur.func.dfg[inst].opcode().is_branch() {
// This is a branch, so we need to make sure that globally live values are in their
// global registers. For EBBs that take arguments, we also need to place the argument
// global registers. For blocks that take arguments, we also need to place the argument
// values in the expected registers.
if let Some(dest) = self.cur.func.dfg[inst].branch_destination() {
if self.program_ebb_arguments(inst, dest) {
if self.program_block_arguments(inst, dest) {
color_dest_args = Some(dest);
}
} else {
@@ -458,7 +462,7 @@ impl<'a> Context<'a> {
debug_assert_eq!(
self.cur.func.dfg.inst_variable_args(inst).len(),
0,
"Can't handle EBB arguments: {}",
"Can't handle block arguments: {}",
self.cur.display_inst(inst)
);
self.undivert_regs(|lr, _| !lr.is_local());
@@ -576,7 +580,7 @@ impl<'a> Context<'a> {
// If this is the first time we branch to `dest`, color its arguments to match the current
// register state.
if let Some(dest) = color_dest_args {
self.color_ebb_params(inst, dest);
self.color_block_params(inst, dest);
}
// Apply the solution to the defs.
@@ -727,7 +731,7 @@ impl<'a> Context<'a> {
// This code runs after calling `solver.inputs_done()` so we must identify
// the new variable as killed or live-through.
let layout = &self.cur.func.layout;
if self.liveness[arg_val].killed_at(inst, layout.pp_ebb(inst), layout) {
if self.liveness[arg_val].killed_at(inst, layout.pp_block(inst), layout) {
self.solver
.add_killed_var(arg_val, constraint.regclass, cur_reg);
} else {
@@ -747,12 +751,12 @@ impl<'a> Context<'a> {
///
/// 1. Any values that are live-in to `dest` must be un-diverted so they live in their globally
/// assigned register.
/// 2. If the `dest` EBB takes arguments, reassign the branch argument values to the matching
/// 2. If the `dest` block takes arguments, reassign the branch argument values to the matching
/// registers.
///
/// Returns true if this is the first time a branch to `dest` is seen, so the `dest` argument
/// values should be colored after `shuffle_inputs`.
fn program_ebb_arguments(&mut self, inst: Inst, dest: Ebb) -> bool {
fn program_block_arguments(&mut self, inst: Inst, dest: Block) -> bool {
// Find diverted registers that are live-in to `dest` and reassign them to their global
// home.
//
@@ -760,9 +764,9 @@ impl<'a> Context<'a> {
// arguments, so they can't always be un-diverted.
self.undivert_regs(|lr, layout| lr.is_livein(dest, layout));
// Now handle the EBB arguments.
// Now handle the block arguments.
let br_args = self.cur.func.dfg.inst_variable_args(inst);
let dest_args = self.cur.func.dfg.ebb_params(dest);
let dest_args = self.cur.func.dfg.block_params(dest);
debug_assert_eq!(br_args.len(), dest_args.len());
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
// The first time we encounter a branch to `dest`, we get to pick the location. The
@@ -771,7 +775,7 @@ impl<'a> Context<'a> {
ValueLoc::Unassigned => {
// This is the first branch to `dest`, so we should color `dest_arg` instead of
// `br_arg`. However, we don't know where `br_arg` will end up until
// after `shuffle_inputs`. See `color_ebb_params` below.
// after `shuffle_inputs`. See `color_block_params` below.
//
// It is possible for `dest_arg` to have no affinity, and then it should simply
// be ignored.
@@ -804,10 +808,10 @@ impl<'a> Context<'a> {
/// Knowing that we've never seen a branch to `dest` before, color its parameters to match our
/// register state.
///
/// This function is only called when `program_ebb_arguments()` returned `true`.
fn color_ebb_params(&mut self, inst: Inst, dest: Ebb) {
/// This function is only called when `program_block_arguments()` returned `true`.
fn color_block_params(&mut self, inst: Inst, dest: Block) {
let br_args = self.cur.func.dfg.inst_variable_args(inst);
let dest_args = self.cur.func.dfg.ebb_params(dest);
let dest_args = self.cur.func.dfg.block_params(dest);
debug_assert_eq!(br_args.len(), dest_args.len());
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
match self.cur.func.locations[dest_arg] {
@@ -818,7 +822,7 @@ impl<'a> Context<'a> {
}
}
ValueLoc::Reg(_) => panic!("{} arg {} already colored", dest, dest_arg),
// Spilled value consistency is verified by `program_ebb_arguments()` above.
// Spilled value consistency is verified by `program_block_arguments()` above.
ValueLoc::Stack(_) => {}
}
}
@@ -1082,7 +1086,7 @@ impl<'a> Context<'a> {
/// Determine if `value` is live on a CFG edge from the current instruction.
///
/// This means that the current instruction is a branch and `value` is live in to one of the
/// branch destinations. Branch arguments and EBB parameters are not considered live on the
/// branch destinations. Branch arguments and block parameters are not considered live on the
/// edge.
fn is_live_on_outgoing_edge(&self, value: Value) -> bool {
use crate::ir::instructions::BranchInfo::*;
@@ -1091,17 +1095,17 @@ impl<'a> Context<'a> {
let layout = &self.cur.func.layout;
match self.cur.func.dfg.analyze_branch(inst) {
NotABranch => false,
SingleDest(ebb, _) => {
SingleDest(block, _) => {
let lr = &self.liveness[value];
lr.is_livein(ebb, layout)
lr.is_livein(block, layout)
}
Table(jt, ebb) => {
Table(jt, block) => {
let lr = &self.liveness[value];
!lr.is_local()
&& (ebb.map_or(false, |ebb| lr.is_livein(ebb, layout))
&& (block.map_or(false, |block| lr.is_livein(block, layout))
|| self.cur.func.jump_tables[jt]
.iter()
.any(|ebb| lr.is_livein(*ebb, layout)))
.any(|block| lr.is_livein(*block, layout)))
}
}
}
@@ -1232,7 +1236,7 @@ impl<'a> Context<'a> {
self.liveness.create_dead(local, inst, lv.affinity);
self.liveness.extend_locally(
local,
self.cur.func.layout.pp_ebb(inst),
self.cur.func.layout.pp_block(inst),
copy,
&self.cur.func.layout,
);

View File

@@ -4,12 +4,12 @@
//! Sometimes, it is necessary to move register values to a different register in order to satisfy
//! instruction constraints.
//!
//! These register diversions are local to an EBB. No values can be diverted when entering a new
//! EBB.
//! These register diversions are local to an block. No values can be diverted when entering a new
//! block.
use crate::fx::FxHashMap;
use crate::hash_map::{Entry, Iter};
use crate::ir::{Ebb, StackSlot, Value, ValueLoc, ValueLocations};
use crate::ir::{Block, StackSlot, Value, ValueLoc, ValueLocations};
use crate::ir::{InstructionData, Opcode};
use crate::isa::{RegInfo, RegUnit};
use core::fmt;
@@ -38,22 +38,22 @@ impl Diversion {
}
}
/// Keep track of diversions in an EBB.
/// Keep track of diversions in an block.
#[derive(Clone)]
pub struct RegDiversions {
current: FxHashMap<Value, Diversion>,
}
/// Keep track of diversions at the entry of EBB.
/// Keep track of diversions at the entry of block.
#[derive(Clone)]
struct EntryRegDiversionsValue {
key: Ebb,
key: Block,
divert: RegDiversions,
}
/// Map EBB to their matching RegDiversions at basic blocks entry.
/// Map block to their matching RegDiversions at basic blocks entry.
pub struct EntryRegDiversions {
map: SparseMap<Ebb, EntryRegDiversionsValue>,
map: SparseMap<Block, EntryRegDiversionsValue>,
}
impl RegDiversions {
@@ -178,22 +178,22 @@ impl RegDiversions {
}
/// Resets the state of the current diversions to the recorded diversions at the entry of the
/// given `ebb`. The recoded diversions is available after coloring on `func.entry_diversions`
/// given `block`. The recoded diversions is available after coloring on `func.entry_diversions`
/// field.
pub fn at_ebb(&mut self, entry_diversions: &EntryRegDiversions, ebb: Ebb) {
pub fn at_block(&mut self, entry_diversions: &EntryRegDiversions, block: Block) {
self.clear();
if let Some(entry_divert) = entry_diversions.map.get(ebb) {
if let Some(entry_divert) = entry_diversions.map.get(block) {
let iter = entry_divert.divert.current.iter();
self.current.extend(iter);
}
}
/// Copy the current state of the diversions, and save it for the entry of the `ebb` given as
/// Copy the current state of the diversions, and save it for the entry of the `block` given as
/// argument.
///
/// Note: This function can only be called once on an `ebb` with a given `entry_diversions`
/// Note: This function can only be called once on a `Block` with a given `entry_diversions`
/// argument, otherwise it would panic.
pub fn save_for_ebb(&mut self, entry_diversions: &mut EntryRegDiversions, target: Ebb) {
pub fn save_for_block(&mut self, entry_diversions: &mut EntryRegDiversions, target: Block) {
// No need to save anything if there is no diversions to be recorded.
if self.is_empty() {
return;
@@ -208,9 +208,9 @@ impl RegDiversions {
});
}
/// Check that the recorded entry for a given `ebb` matches what is recorded in the
/// Check that the recorded entry for a given `block` matches what is recorded in the
/// `entry_diversions`.
pub fn check_ebb_entry(&self, entry_diversions: &EntryRegDiversions, target: Ebb) -> bool {
pub fn check_block_entry(&self, entry_diversions: &EntryRegDiversions, target: Block) -> bool {
let entry_divert = match entry_diversions.map.get(target) {
Some(entry_divert) => entry_divert,
None => return self.is_empty(),
@@ -235,7 +235,7 @@ impl RegDiversions {
}
impl EntryRegDiversions {
/// Create a new empty entry diversion, to associate diversions to each EBB entry.
/// Create a new empty entry diversion, to associate diversions to each block entry.
pub fn new() -> Self {
Self {
map: SparseMap::new(),
@@ -259,9 +259,9 @@ impl Clone for EntryRegDiversions {
}
/// Implement `SparseMapValue`, as required to make use of a `SparseMap` for mapping the entry
/// diversions for each EBB.
impl SparseMapValue<Ebb> for EntryRegDiversionsValue {
fn key(&self) -> Ebb {
/// diversions for each block.
impl SparseMapValue<Block> for EntryRegDiversionsValue {
fn key(&self) -> Block {
self.key
}
}

View File

@@ -1,13 +1,13 @@
//! Track which values are live in an EBB with instruction granularity.
//! Track which values are live in an block with instruction granularity.
//!
//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an EBB.
//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an block.
//! The sets of live values are computed on the fly as the tracker is moved from instruction to
//! instruction, starting at the EBB header.
//! instruction, starting at the block header.
use crate::dominator_tree::DominatorTree;
use crate::entity::{EntityList, ListPool};
use crate::fx::FxHashMap;
use crate::ir::{DataFlowGraph, Ebb, ExpandedProgramPoint, Inst, Layout, Value};
use crate::ir::{Block, DataFlowGraph, ExpandedProgramPoint, Inst, Layout, Value};
use crate::partition_slice::partition_slice;
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liveness::Liveness;
@@ -16,13 +16,13 @@ use alloc::vec::Vec;
type ValueList = EntityList<Value>;
/// Compute and track live values throughout an EBB.
/// Compute and track live values throughout an block.
pub struct LiveValueTracker {
/// The set of values that are live at the current program point.
live: LiveValueVec,
/// Saved set of live values for every jump and branch that can potentially be an immediate
/// dominator of an EBB.
/// dominator of an block.
///
/// This is the set of values that are live *before* the branch.
idom_sets: FxHashMap<Inst, ValueList>,
@@ -37,7 +37,7 @@ pub struct LiveValue {
/// The live value.
pub value: Value,
/// The local ending point of the live range in the current EBB, as returned by
/// The local ending point of the live range in the current block, as returned by
/// `LiveRange::def_local_end()` or `LiveRange::livein_local_end()`.
pub endpoint: Inst,
@@ -47,7 +47,7 @@ pub struct LiveValue {
/// almost all users of `LiveValue` need to look at it.
pub affinity: Affinity,
/// The live range for this value never leaves its EBB.
/// The live range for this value never leaves its block.
pub is_local: bool,
/// This value is dead - the live range ends immediately.
@@ -155,75 +155,75 @@ impl LiveValueTracker {
&mut self.live.values
}
/// Move the current position to the top of `ebb`.
/// Move the current position to the top of `block`.
///
/// This depends on the stored live value set at `ebb`'s immediate dominator, so that must have
/// This depends on the stored live value set at `block`'s immediate dominator, so that must have
/// been visited first.
///
/// Returns `(liveins, args)` as a pair of slices. The first slice is the set of live-in values
/// from the immediate dominator. The second slice is the set of `ebb` parameters.
/// from the immediate dominator. The second slice is the set of `block` parameters.
///
/// Dead parameters with no uses are included in `args`. Call `drop_dead_args()` to remove them.
pub fn ebb_top(
pub fn block_top(
&mut self,
ebb: Ebb,
block: Block,
dfg: &DataFlowGraph,
liveness: &Liveness,
layout: &Layout,
domtree: &DominatorTree,
) -> (&[LiveValue], &[LiveValue]) {
// Start over, compute the set of live values at the top of the EBB from two sources:
// Start over, compute the set of live values at the top of the block from two sources:
//
// 1. Values that were live before `ebb`'s immediate dominator, filtered for those that are
// 1. Values that were live before `block`'s immediate dominator, filtered for those that are
// actually live-in.
// 2. Arguments to `ebb` that are not dead.
// 2. Arguments to `block` that are not dead.
//
self.live.clear();
// Compute the live-in values. Start by filtering the set of values that were live before
// the immediate dominator. Just use the empty set if there's no immediate dominator (i.e.,
// the entry block or an unreachable block).
if let Some(idom) = domtree.idom(ebb) {
if let Some(idom) = domtree.idom(block) {
// If the immediate dominator exits, we must have a stored list for it. This is a
// requirement to the order EBBs are visited: All dominators must have been processed
// before the current EBB.
// requirement to the order blocks are visited: All dominators must have been processed
// before the current block.
let idom_live_list = self
.idom_sets
.get(&idom)
.expect("No stored live set for dominator");
// Get just the values that are live-in to `ebb`.
// Get just the values that are live-in to `block`.
for &value in idom_live_list.as_slice(&self.idom_pool) {
let lr = liveness
.get(value)
.expect("Immediate dominator value has no live range");
// Check if this value is live-in here.
if let Some(endpoint) = lr.livein_local_end(ebb, layout) {
if let Some(endpoint) = lr.livein_local_end(block, layout) {
self.live.push(value, endpoint, lr);
}
}
}
// Now add all the live parameters to `ebb`.
// Now add all the live parameters to `block`.
let first_arg = self.live.values.len();
for &value in dfg.ebb_params(ebb) {
for &value in dfg.block_params(block) {
let lr = &liveness[value];
debug_assert_eq!(lr.def(), ebb.into());
debug_assert_eq!(lr.def(), block.into());
match lr.def_local_end().into() {
ExpandedProgramPoint::Inst(endpoint) => {
self.live.push(value, endpoint, lr);
}
ExpandedProgramPoint::Ebb(local_ebb) => {
// This is a dead EBB parameter which is not even live into the first
// instruction in the EBB.
ExpandedProgramPoint::Block(local_block) => {
// This is a dead block parameter which is not even live into the first
// instruction in the block.
debug_assert_eq!(
local_ebb, ebb,
"EBB parameter live range ends at wrong EBB header"
local_block, block,
"block parameter live range ends at wrong block header"
);
// Give this value a fake endpoint that is the first instruction in the EBB.
// Give this value a fake endpoint that is the first instruction in the block.
// We expect it to be removed by calling `drop_dead_args()`.
self.live
.push(value, layout.first_inst(ebb).expect("Empty EBB"), lr);
.push(value, layout.first_inst(block).expect("Empty block"), lr);
}
}
}
@@ -274,8 +274,8 @@ impl LiveValueTracker {
ExpandedProgramPoint::Inst(endpoint) => {
self.live.push(value, endpoint, lr);
}
ExpandedProgramPoint::Ebb(ebb) => {
panic!("Instruction result live range can't end at {}", ebb);
ExpandedProgramPoint::Block(block) => {
panic!("Instruction result live range can't end at {}", block);
}
}
}
@@ -310,7 +310,7 @@ impl LiveValueTracker {
/// Drop any values that are marked as `is_dead`.
///
/// Use this after calling `ebb_top` to clean out dead EBB parameters.
/// Use this after calling `block_top` to clean out dead block parameters.
pub fn drop_dead_params(&mut self) {
self.live.remove_dead_values();
}

View File

@@ -7,18 +7,18 @@
//! # Liveness consumers
//!
//! The primary consumer of the liveness analysis is the SSA coloring pass which goes through each
//! EBB and assigns a register to the defined values. This algorithm needs to maintain a set of the
//! currently live values as it is iterating down the instructions in the EBB. It asks the
//! block and assigns a register to the defined values. This algorithm needs to maintain a set of the
//! currently live values as it is iterating down the instructions in the block. It asks the
//! following questions:
//!
//! - What is the set of live values at the entry to the EBB?
//! - When moving past a use of a value, is that value still alive in the EBB, or was that the last
//! - What is the set of live values at the entry to the block?
//! - When moving past a use of a value, is that value still alive in the block, or was that the last
//! use?
//! - When moving past a branch, which of the live values are still live below the branch?
//!
//! The set of `LiveRange` instances can answer these questions through their `def_local_end` and
//! `livein_local_end` queries. The coloring algorithm visits EBBs in a topological order of the
//! dominator tree, so it can compute the set of live values at the beginning of an EBB by starting
//! `livein_local_end` queries. The coloring algorithm visits blocks in a topological order of the
//! dominator tree, so it can compute the set of live values at the beginning of an block by starting
//! from the set of live values at the dominating branch instruction and filtering it with
//! `livein_local_end`. These sets do not need to be stored in the liveness analysis.
//!
@@ -43,7 +43,7 @@
//!
//! - Quadratic memory use. We need a bit per variable per basic block in the function.
//! - Dense representation of sparse data. In practice, the majority of SSA values never leave
//! their basic block, and those that do span basic blocks rarely span a large number of basic
//! their basic block, and those that do spa basic blocks rarely span a large number of basic
//! blocks. This makes the data stored in the bitvectors quite sparse.
//! - Traditionally, the data-flow equations were solved for real program *variables* which does
//! not include temporaries used in evaluating expressions. We have an SSA form program which
@@ -141,10 +141,10 @@
//! - The first time a value is encountered, its live range is constructed as a dead live range
//! containing only the defining program point.
//! - The local interval of the value's live range is extended so it reaches the use. This may
//! require creating a new live-in local interval for the EBB.
//! - If the live range became live-in to the EBB, add the EBB to a work-list.
//! - While the work-list is non-empty pop a live-in EBB and repeat the two steps above, using each
//! of the live-in EBB's CFG predecessor instructions as a 'use'.
//! require creating a new live-in local interval for the block.
//! - If the live range became live-in to the block, add the block to a work-list.
//! - While the work-list is non-empty pop a live-in block and repeat the two steps above, using each
//! of the live-in block's CFG predecessor instructions as a 'use'.
//!
//! The effect of this algorithm is to extend the live range of each to reach uses as they are
//! visited. No data about each value beyond the live range is needed between visiting uses, so
@@ -176,9 +176,9 @@
//! There is some room for improvement.
use crate::entity::SparseMap;
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::dfg::ValueDef;
use crate::ir::{Ebb, Function, Inst, Layout, ProgramPoint, Value};
use crate::ir::{Block, Function, Inst, Layout, ProgramPoint, Value};
use crate::isa::{EncInfo, OperandConstraint, TargetIsa};
use crate::regalloc::affinity::Affinity;
use crate::regalloc::liverange::LiveRange;
@@ -223,14 +223,14 @@ fn get_or_create<'a>(
})
.unwrap_or_default();
}
ValueDef::Param(ebb, num) => {
def = ebb.into();
if func.layout.entry_block() == Some(ebb) {
ValueDef::Param(block, num) => {
def = block.into();
if func.layout.entry_block() == Some(block) {
// The affinity for entry block parameters can be inferred from the function
// signature.
affinity = Affinity::abi(&func.signature.params[num], isa);
} else {
// Give normal EBB parameters a register affinity matching their type.
// Give normal block parameters a register affinity matching their type.
let rc = isa.regclass_for_abi_type(func.dfg.value_type(value));
affinity = Affinity::Reg(rc.into());
}
@@ -241,43 +241,43 @@ fn get_or_create<'a>(
lrset.get_mut(value).unwrap()
}
/// Extend the live range for `value` so it reaches `to` which must live in `ebb`.
/// Extend the live range for `value` so it reaches `to` which must live in `block`.
fn extend_to_use(
lr: &mut LiveRange,
ebb: Ebb,
block: Block,
to: Inst,
worklist: &mut Vec<Ebb>,
worklist: &mut Vec<Block>,
func: &Function,
cfg: &ControlFlowGraph,
) {
// This is our scratch working space, and we'll leave it empty when we return.
debug_assert!(worklist.is_empty());
// Extend the range locally in `ebb`.
// Extend the range locally in `block`.
// If there already was a live interval in that block, we're done.
if lr.extend_in_ebb(ebb, to, &func.layout) {
worklist.push(ebb);
if lr.extend_in_block(block, to, &func.layout) {
worklist.push(block);
}
// The work list contains those EBBs where we have learned that the value needs to be
// The work list contains those blocks where we have learned that the value needs to be
// live-in.
//
// This algorithm becomes a depth-first traversal up the CFG, enumerating all paths through the
// CFG from the existing live range to `ebb`.
// CFG from the existing live range to `block`.
//
// Extend the live range as we go. The live range itself also serves as a visited set since
// `extend_in_ebb` will never return true twice for the same EBB.
// `extend_in_block` will never return true twice for the same block.
//
while let Some(livein) = worklist.pop() {
// We've learned that the value needs to be live-in to the `livein` EBB.
// We've learned that the value needs to be live-in to the `livein` block.
// Make sure it is also live at all predecessor branches to `livein`.
for BasicBlock {
ebb: pred,
for BlockPredecessor {
block: pred,
inst: branch,
} in cfg.pred_iter(livein)
{
if lr.extend_in_ebb(pred, branch, &func.layout) {
// This predecessor EBB also became live-in. We need to process it later.
if lr.extend_in_block(pred, branch, &func.layout) {
// This predecessor block also became live-in. We need to process it later.
worklist.push(pred);
}
}
@@ -294,7 +294,7 @@ pub struct Liveness {
/// Working space for the `extend_to_use` algorithm.
/// This vector is always empty, except for inside that function.
/// It lives here to avoid repeated allocation of scratch memory.
worklist: Vec<Ebb>,
worklist: Vec<Block>,
}
impl Liveness {
@@ -342,7 +342,7 @@ impl Liveness {
/// Move the definition of `value` to `def`.
///
/// The old and new def points must be in the same EBB, and before the end of the live range.
/// The old and new def points must be in the same block, and before the end of the live range.
pub fn move_def_locally<PP>(&mut self, value: Value, def: PP)
where
PP: Into<ProgramPoint>,
@@ -353,20 +353,20 @@ impl Liveness {
/// Locally extend the live range for `value` to reach `user`.
///
/// It is assumed the `value` is already live before `user` in `ebb`.
/// It is assumed the `value` is already live before `user` in `block`.
///
/// Returns a mutable reference to the value's affinity in case that also needs to be updated.
pub fn extend_locally(
&mut self,
value: Value,
ebb: Ebb,
block: Block,
user: Inst,
layout: &Layout,
) -> &mut Affinity {
debug_assert_eq!(Some(ebb), layout.inst_ebb(user));
debug_assert_eq!(Some(block), layout.inst_block(user));
let lr = self.ranges.get_mut(value).expect("Value has no live range");
let livein = lr.extend_in_ebb(ebb, user, layout);
debug_assert!(!livein, "{} should already be live in {}", value, ebb);
let livein = lr.extend_in_block(block, user, layout);
debug_assert!(!livein, "{} should already be live in {}", value, block);
&mut lr.affinity
}
@@ -389,15 +389,15 @@ impl Liveness {
// The liveness computation needs to visit all uses, but the order doesn't matter.
// TODO: Perhaps this traversal of the function could be combined with a dead code
// elimination pass if we visit a post-order of the dominator tree?
for ebb in func.layout.ebbs() {
// Make sure we have created live ranges for dead EBB parameters.
for block in func.layout.blocks() {
// Make sure we have created live ranges for dead block parameters.
// TODO: If these parameters are really dead, we could remove them, except for the
// entry block which must match the function signature.
for &arg in func.dfg.ebb_params(ebb) {
for &arg in func.dfg.block_params(block) {
get_or_create(&mut self.ranges, arg, isa, func, &encinfo);
}
for inst in func.layout.ebb_insts(ebb) {
for inst in func.layout.block_insts(block) {
// Eliminate all value aliases, they would confuse the register allocator.
func.dfg.resolve_aliases_in_arguments(inst);
@@ -419,11 +419,11 @@ impl Liveness {
let lr = get_or_create(&mut self.ranges, arg, isa, func, &encinfo);
// Extend the live range to reach this use.
extend_to_use(lr, ebb, inst, &mut self.worklist, func, cfg);
extend_to_use(lr, block, inst, &mut self.worklist, func, cfg);
// Apply operand constraint, ignoring any variable arguments after the fixed
// operands described by `operand_constraints`. Variable arguments are either
// EBB arguments or call/return ABI arguments.
// block arguments or call/return ABI arguments.
if let Some(constraint) = operand_constraints.next() {
lr.affinity.merge(constraint, &reginfo);
}

View File

@@ -6,29 +6,29 @@
//!
//! # Local Live Ranges
//!
//! Inside a single extended basic block, the live range of a value is always an interval between
//! two program points (if the value is live in the EBB at all). The starting point is either:
//! Inside a single basic block, the live range of a value is always an interval between
//! two program points (if the value is live in the block at all). The starting point is either:
//!
//! 1. The instruction that defines the value, or
//! 2. The EBB header, because the value is an argument to the EBB, or
//! 3. The EBB header, because the value is defined in another EBB and live-in to this one.
//! 2. The block header, because the value is an argument to the block, or
//! 3. The block header, because the value is defined in another block and live-in to this one.
//!
//! The ending point of the local live range is the last of the following program points in the
//! EBB:
//! block:
//!
//! 1. The last use in the EBB, where a *use* is an instruction that has the value as an argument.
//! 2. The last branch or jump instruction in the EBB that can reach a use.
//! 1. The last use in the block, where a *use* is an instruction that has the value as an argument.
//! 2. The last branch or jump instruction in the block that can reach a use.
//! 3. If the value has no uses anywhere (a *dead value*), the program point that defines it.
//!
//! Note that 2. includes loop back-edges to the same EBB. In general, if a value is defined
//! Note that 2. includes loop back-edges to the same block. In general, if a value is defined
//! outside a loop and used inside the loop, it will be live in the entire loop.
//!
//! # Global Live Ranges
//!
//! Values that appear in more than one EBB have a *global live range* which can be seen as the
//! disjoint union of the per-EBB local intervals for all of the EBBs where the value is live.
//! Together with a `ProgramOrder` which provides a linear ordering of the EBBs, the global live
//! range becomes a linear sequence of disjoint intervals, at most one per EBB.
//! Values that appear in more than one block have a *global live range* which can be seen as the
//! disjoint union of the per-block local intervals for all of the blocks where the value is live.
//! Together with a `ProgramOrder` which provides a linear ordering of the blocks, the global live
//! range becomes a linear sequence of disjoint intervals, at most one per block.
//!
//! In the special case of a dead value, the global live range is a single interval where the start
//! and end points are the same. The global live range of a value is never completely empty.
@@ -64,58 +64,58 @@
//! ## Current representation
//!
//! Our current implementation uses a sorted array of compressed intervals, represented by their
//! boundaries (Ebb, Inst), sorted by Ebb. This is a simple data structure, enables coalescing of
//! boundaries (Block, Inst), sorted by Block. This is a simple data structure, enables coalescing of
//! intervals easily, and shows some nice performance behavior. See
//! https://github.com/bytecodealliance/cranelift/issues/1084 for benchmarks against using a
//! bforest::Map<Ebb, Inst>.
//! bforest::Map<Block, Inst>.
//!
//! ## EBB ordering
//! ## block ordering
//!
//! The relative order of EBBs is used to maintain a sorted list of live-in intervals and to
//! coalesce adjacent live-in intervals when the prior interval covers the whole EBB. This doesn't
//! The relative order of blocks is used to maintain a sorted list of live-in intervals and to
//! coalesce adjacent live-in intervals when the prior interval covers the whole block. This doesn't
//! depend on any property of the program order, so alternative orderings are possible:
//!
//! 1. The EBB layout order. This is what we currently use.
//! 1. The block layout order. This is what we currently use.
//! 2. A topological order of the dominator tree. All the live-in intervals would come after the
//! def interval.
//! 3. A numerical order by EBB number. Performant because it doesn't need to indirect through the
//! 3. A numerical order by block number. Performant because it doesn't need to indirect through the
//! `ProgramOrder` for comparisons.
//!
//! These orderings will cause small differences in coalescing opportunities, but all of them would
//! do a decent job of compressing a long live range. The numerical order might be preferable
//! because:
//!
//! - It has better performance because EBB numbers can be compared directly without any table
//! - It has better performance because block numbers can be compared directly without any table
//! lookups.
//! - If EBB numbers are not reused, it is safe to allocate new EBBs without getting spurious
//! live-in intervals from any coalesced representations that happen to cross a new EBB.
//! - If block numbers are not reused, it is safe to allocate new blocks without getting spurious
//! live-in intervals from any coalesced representations that happen to cross a new block.
//!
//! For comparing instructions, the layout order is always what we want.
//!
//! ## Alternative representation
//!
//! Since a local live-in interval always begins at its EBB header, it is uniquely described by its
//! end point instruction alone. We can use the layout to look up the EBB containing the end point.
//! Since a local live-in interval always begins at its block header, it is uniquely described by its
//! end point instruction alone. We can use the layout to look up the block containing the end point.
//! This means that a sorted `Vec<Inst>` would be enough to represent the set of live-in intervals.
//!
//! Coalescing is an important compression technique because some live ranges can span thousands of
//! EBBs. We can represent that by switching to a sorted `Vec<ProgramPoint>` representation where
//! an `[Ebb, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding
//! `Ebb` entry represents a single live-in interval.
//! blocks. We can represent that by switching to a sorted `Vec<ProgramPoint>` representation where
//! an `[Block, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding
//! `Block` entry represents a single live-in interval.
//!
//! This representation is more compact for a live range with many uncoalesced live-in intervals.
//! It is more complicated to work with, though, so it is probably not worth it. The performance
//! benefits of switching to a numerical EBB order only appears if the binary search is doing
//! EBB-EBB comparisons.
//! benefits of switching to a numerical block order only appears if the binary search is doing
//! block-block comparisons.
//!
//! A `BTreeMap<Ebb, Inst>` could have been used for the live-in intervals, but it doesn't provide
//! A `BTreeMap<Block, Inst>` could have been used for the live-in intervals, but it doesn't provide
//! the necessary API to make coalescing easy, nor does it optimize for our types' sizes.
//!
//! Even the specialized `bforest::Map<Ebb, Inst>` implementation is slower than a plain sorted
//! Even the specialized `bforest::Map<Block, Inst>` implementation is slower than a plain sorted
//! array, see https://github.com/bytecodealliance/cranelift/issues/1084 for details.
use crate::entity::SparseMapValue;
use crate::ir::{Ebb, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value};
use crate::ir::{Block, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value};
use crate::regalloc::affinity::Affinity;
use core::cmp::Ordering;
use core::marker::PhantomData;
@@ -124,14 +124,14 @@ use smallvec::SmallVec;
/// Global live range of a single SSA value.
///
/// As [explained in the module documentation](index.html#local-live-ranges), the live range of an
/// SSA value is the disjoint union of a set of intervals, each local to a single EBB, and with at
/// most one interval per EBB. We further distinguish between:
/// SSA value is the disjoint union of a set of intervals, each local to a single block, and with at
/// most one interval per block. We further distinguish between:
///
/// 1. The *def interval* is the local interval in the EBB where the value is defined, and
/// 2. The *live-in intervals* are the local intervals in the remaining EBBs.
/// 1. The *def interval* is the local interval in the block where the value is defined, and
/// 2. The *live-in intervals* are the local intervals in the remaining blocks.
///
/// A live-in interval always begins at the EBB header, while the def interval can begin at the
/// defining instruction, or at the EBB header for an EBB argument value.
/// A live-in interval always begins at the block header, while the def interval can begin at the
/// defining instruction, or at the block header for an block argument value.
///
/// All values have a def interval, but a large proportion of values don't have any live-in
/// intervals. These are called *local live ranges*.
@@ -139,11 +139,11 @@ use smallvec::SmallVec;
/// # Program order requirements
///
/// The internal representation of a `LiveRange` depends on a consistent `ProgramOrder` both for
/// ordering instructions inside an EBB *and* for ordering EBBs. The methods that depend on the
/// ordering instructions inside an block *and* for ordering blocks. The methods that depend on the
/// ordering take an explicit `ProgramOrder` object, and it is the caller's responsibility to
/// ensure that the provided ordering is consistent between calls.
///
/// In particular, changing the order of EBBs or inserting new EBBs will invalidate live ranges.
/// In particular, changing the order of blocks or inserting new blocks will invalidate live ranges.
///
/// Inserting new instructions in the layout is safe, but removing instructions is not. Besides the
/// instructions using or defining their value, `LiveRange` structs can contain references to
@@ -152,7 +152,7 @@ pub type LiveRange = GenericLiveRange<Layout>;
// See comment of liveins below.
pub struct Interval {
begin: Ebb,
begin: Block,
end: Inst,
}
@@ -168,10 +168,10 @@ pub struct GenericLiveRange<PO: ProgramOrder> {
/// The preferred register allocation for this value.
pub affinity: Affinity,
/// The instruction or EBB header where this value is defined.
/// The instruction or block header where this value is defined.
def_begin: ProgramPoint,
/// The end point of the def interval. This must always belong to the same EBB as `def_begin`.
/// The end point of the def interval. This must always belong to the same block as `def_begin`.
///
/// We always have `def_begin <= def_end` with equality implying a dead def live range with no
/// uses.
@@ -179,12 +179,12 @@ pub struct GenericLiveRange<PO: ProgramOrder> {
/// Additional live-in intervals sorted in program order.
///
/// This vector is empty for most values which are only used in one EBB.
/// This vector is empty for most values which are only used in one block.
///
/// An entry `ebb -> inst` means that the live range is live-in to `ebb`, continuing up to
/// `inst` which may belong to a later EBB in the program order.
/// An entry `block -> inst` means that the live range is live-in to `block`, continuing up to
/// `inst` which may belong to a later block in the program order.
///
/// The entries are non-overlapping, and none of them overlap the EBB where the value is
/// The entries are non-overlapping, and none of them overlap the block where the value is
/// defined.
liveins: SmallVec<[Interval; 2]>,
@@ -210,7 +210,7 @@ macro_rules! cmp {
impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Create a new live range for `value` defined at `def`.
///
/// The live range will be created as dead, but it can be extended with `extend_in_ebb()`.
/// The live range will be created as dead, but it can be extended with `extend_in_block()`.
pub fn new(value: Value, def: ProgramPoint, affinity: Affinity) -> Self {
Self {
value,
@@ -222,14 +222,14 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
}
/// Finds an entry in the compressed set of live-in intervals that contains `ebb`, or return
/// Finds an entry in the compressed set of live-in intervals that contains `block`, or return
/// the position where to insert such a new entry.
fn lookup_entry_containing_ebb(&self, ebb: Ebb, order: &PO) -> Result<usize, usize> {
fn lookup_entry_containing_block(&self, block: Block, order: &PO) -> Result<usize, usize> {
self.liveins
.binary_search_by(|interval| order.cmp(interval.begin, ebb))
.binary_search_by(|interval| order.cmp(interval.begin, block))
.or_else(|n| {
// The previous interval's end might cover the searched ebb.
if n > 0 && cmp!(order, ebb <= self.liveins[n - 1].end) {
// The previous interval's end might cover the searched block.
if n > 0 && cmp!(order, block <= self.liveins[n - 1].end) {
Ok(n - 1)
} else {
Err(n)
@@ -237,23 +237,23 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
})
}
/// Extend the local interval for `ebb` so it reaches `to` which must belong to `ebb`.
/// Extend the local interval for `block` so it reaches `to` which must belong to `block`.
/// Create a live-in interval if necessary.
///
/// If the live range already has a local interval in `ebb`, extend its end point so it
/// If the live range already has a local interval in `block`, extend its end point so it
/// includes `to`, and return false.
///
/// If the live range did not previously have a local interval in `ebb`, add one so the value
/// is live-in to `ebb`, extending to `to`. Return true.
/// If the live range did not previously have a local interval in `block`, add one so the value
/// is live-in to `block`, extending to `to`. Return true.
///
/// The return value can be used to detect if we just learned that the value is live-in to
/// `ebb`. This can trigger recursive extensions in `ebb`'s CFG predecessor blocks.
pub fn extend_in_ebb(&mut self, ebb: Ebb, inst: Inst, order: &PO) -> bool {
/// `block`. This can trigger recursive extensions in `block`'s CFG predecessor blocks.
pub fn extend_in_block(&mut self, block: Block, inst: Inst, order: &PO) -> bool {
// First check if we're extending the def interval.
//
// We're assuming here that `inst` never precedes `def_begin` in the same EBB, but we can't
// check it without a method for getting `inst`'s EBB.
if cmp!(order, ebb <= self.def_end) && cmp!(order, inst >= self.def_begin) {
// We're assuming here that `inst` never precedes `def_begin` in the same block, but we can't
// check it without a method for getting `inst`'s block.
if cmp!(order, block <= self.def_end) && cmp!(order, inst >= self.def_begin) {
let inst_pp = inst.into();
debug_assert_ne!(
inst_pp, self.def_begin,
@@ -266,7 +266,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
// Now check if we're extending any of the existing live-in intervals.
match self.lookup_entry_containing_ebb(ebb, order) {
match self.lookup_entry_containing_block(block, order) {
Ok(n) => {
// We found one interval and might need to extend it.
if cmp!(order, inst <= self.liveins[n].end) {
@@ -278,7 +278,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
// coalesce the two intervals:
// [ival.begin; ival.end] + [next.begin; next.end] = [ival.begin; next.end]
if let Some(next) = &self.liveins.get(n + 1) {
if order.is_ebb_gap(inst, next.begin) {
if order.is_block_gap(inst, next.begin) {
// At this point we can choose to remove the current interval or the next
// one; remove the next one to avoid one memory move.
let next_end = next.end;
@@ -295,17 +295,17 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
Err(n) => {
// No interval was found containing the current EBB: we need to insert a new one,
// No interval was found containing the current block: we need to insert a new one,
// unless there's a coalescing opportunity with the previous or next one.
let coalesce_next = self
.liveins
.get(n)
.filter(|next| order.is_ebb_gap(inst, next.begin))
.filter(|next| order.is_block_gap(inst, next.begin))
.is_some();
let coalesce_prev = self
.liveins
.get(n.wrapping_sub(1))
.filter(|prev| order.is_ebb_gap(prev.end, ebb))
.filter(|prev| order.is_block_gap(prev.end, block))
.is_some();
match (coalesce_prev, coalesce_next) {
@@ -324,8 +324,8 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
self.liveins[n - 1].end = inst;
}
(false, true) => {
debug_assert!(cmp!(order, ebb <= self.liveins[n].begin));
self.liveins[n].begin = ebb;
debug_assert!(cmp!(order, block <= self.liveins[n].begin));
self.liveins[n].begin = block;
}
(false, false) => {
@@ -333,7 +333,7 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
self.liveins.insert(
n,
Interval {
begin: ebb,
begin: block,
end: inst,
},
);
@@ -355,15 +355,15 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Is this a local live range?
///
/// A local live range is only used in the same EBB where it was defined. It is allowed to span
/// multiple basic blocks within that EBB.
/// A local live range is only used in the same block where it was defined. It is allowed to span
/// multiple basic blocks within that block.
pub fn is_local(&self) -> bool {
self.liveins.is_empty()
}
/// Get the program point where this live range is defined.
///
/// This will be an EBB header when the value is an EBB argument, otherwise it is the defining
/// This will be an block header when the value is an block argument, otherwise it is the defining
/// instruction.
pub fn def(&self) -> ProgramPoint {
self.def_begin
@@ -371,33 +371,33 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
/// Move the definition of this value to a new program point.
///
/// It is only valid to move the definition within the same EBB, and it can't be moved beyond
/// It is only valid to move the definition within the same block, and it can't be moved beyond
/// `def_local_end()`.
pub fn move_def_locally(&mut self, def: ProgramPoint) {
self.def_begin = def;
}
/// Get the local end-point of this live range in the EBB where it is defined.
/// Get the local end-point of this live range in the block where it is defined.
///
/// This can be the EBB header itself in the case of a dead EBB argument.
/// This can be the block header itself in the case of a dead block argument.
/// Otherwise, it will be the last local use or branch/jump that can reach a use.
pub fn def_local_end(&self) -> ProgramPoint {
self.def_end
}
/// Get the local end-point of this live range in an EBB where it is live-in.
/// Get the local end-point of this live range in an block where it is live-in.
///
/// If this live range is not live-in to `ebb`, return `None`. Otherwise, return the end-point
/// of this live range's local interval in `ebb`.
/// If this live range is not live-in to `block`, return `None`. Otherwise, return the end-point
/// of this live range's local interval in `block`.
///
/// If the live range is live through all of `ebb`, the terminator of `ebb` is a correct
/// If the live range is live through all of `block`, the terminator of `block` is a correct
/// answer, but it is also possible that an even later program point is returned. So don't
/// depend on the returned `Inst` to belong to `ebb`.
pub fn livein_local_end(&self, ebb: Ebb, order: &PO) -> Option<Inst> {
self.lookup_entry_containing_ebb(ebb, order)
/// depend on the returned `Inst` to belong to `block`.
pub fn livein_local_end(&self, block: Block, order: &PO) -> Option<Inst> {
self.lookup_entry_containing_block(block, order)
.and_then(|i| {
let inst = self.liveins[i].end;
if cmp!(order, ebb < inst) {
if cmp!(order, block < inst) {
Ok(inst)
} else {
// Can be any error type, really, since it's discarded by ok().
@@ -407,25 +407,25 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
.ok()
}
/// Is this value live-in to `ebb`?
/// Is this value live-in to `block`?
///
/// An EBB argument is not considered to be live in.
pub fn is_livein(&self, ebb: Ebb, order: &PO) -> bool {
self.livein_local_end(ebb, order).is_some()
/// An block argument is not considered to be live in.
pub fn is_livein(&self, block: Block, order: &PO) -> bool {
self.livein_local_end(block, order).is_some()
}
/// Get all the live-in intervals.
///
/// Note that the intervals are stored in a compressed form so each entry may span multiple
/// EBBs where the value is live in.
pub fn liveins<'a>(&'a self) -> impl Iterator<Item = (Ebb, Inst)> + 'a {
/// blocks where the value is live in.
pub fn liveins<'a>(&'a self) -> impl Iterator<Item = (Block, Inst)> + 'a {
self.liveins
.iter()
.map(|interval| (interval.begin, interval.end))
}
/// Check if this live range overlaps a definition in `ebb`.
pub fn overlaps_def(&self, def: ExpandedProgramPoint, ebb: Ebb, order: &PO) -> bool {
/// Check if this live range overlaps a definition in `block`.
pub fn overlaps_def(&self, def: ExpandedProgramPoint, block: Block, order: &PO) -> bool {
// Two defs at the same program point always overlap, even if one is dead.
if def == self.def_begin.into() {
return true;
@@ -437,29 +437,29 @@ impl<PO: ProgramOrder> GenericLiveRange<PO> {
}
// Check for an overlap with a live-in range.
match self.livein_local_end(ebb, order) {
match self.livein_local_end(block, order) {
Some(inst) => cmp!(order, def < inst),
None => false,
}
}
/// Check if this live range reaches a use at `user` in `ebb`.
pub fn reaches_use(&self, user: Inst, ebb: Ebb, order: &PO) -> bool {
/// Check if this live range reaches a use at `user` in `block`.
pub fn reaches_use(&self, user: Inst, block: Block, order: &PO) -> bool {
// Check for an overlap with the local range.
if cmp!(order, user > self.def_begin) && cmp!(order, user <= self.def_end) {
return true;
}
// Check for an overlap with a live-in range.
match self.livein_local_end(ebb, order) {
match self.livein_local_end(block, order) {
Some(inst) => cmp!(order, user <= inst),
None => false,
}
}
/// Check if this live range is killed at `user` in `ebb`.
pub fn killed_at(&self, user: Inst, ebb: Ebb, order: &PO) -> bool {
self.def_local_end() == user.into() || self.livein_local_end(ebb, order) == Some(user)
/// Check if this live range is killed at `user` in `block`.
pub fn killed_at(&self, user: Inst, block: Block, order: &PO) -> bool {
self.def_local_end() == user.into() || self.livein_local_end(block, order) == Some(user)
}
}
@@ -474,15 +474,15 @@ impl<PO: ProgramOrder> SparseMapValue<Value> for GenericLiveRange<PO> {
mod tests {
use super::{GenericLiveRange, Interval};
use crate::entity::EntityRef;
use crate::ir::{Ebb, Inst, Value};
use crate::ir::{Block, Inst, Value};
use crate::ir::{ExpandedProgramPoint, ProgramOrder};
use alloc::vec::Vec;
use core::cmp::Ordering;
// Dummy program order which simply compares indexes.
// It is assumed that EBBs have indexes that are multiples of 10, and instructions have indexes
// in between. `is_ebb_gap` assumes that terminator instructions have indexes of the form
// ebb * 10 + 1. This is used in the coalesce test.
// It is assumed that blocks have indexes that are multiples of 10, and instructions have indexes
// in between. `is_block_gap` assumes that terminator instructions have indexes of the form
// block * 10 + 1. This is used in the coalesce test.
struct ProgOrder {}
impl ProgramOrder for ProgOrder {
@@ -494,7 +494,7 @@ mod tests {
fn idx(pp: ExpandedProgramPoint) -> usize {
match pp {
ExpandedProgramPoint::Inst(i) => i.index(),
ExpandedProgramPoint::Ebb(e) => e.index(),
ExpandedProgramPoint::Block(e) => e.index(),
}
}
@@ -503,31 +503,31 @@ mod tests {
ia.cmp(&ib)
}
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool {
inst.index() % 10 == 1 && ebb.index() / 10 == inst.index() / 10 + 1
fn is_block_gap(&self, inst: Inst, block: Block) -> bool {
inst.index() % 10 == 1 && block.index() / 10 == inst.index() / 10 + 1
}
}
impl ProgOrder {
// Get the EBB corresponding to `inst`.
fn inst_ebb(&self, inst: Inst) -> Ebb {
// Get the block corresponding to `inst`.
fn inst_block(&self, inst: Inst) -> Block {
let i = inst.index();
Ebb::new(i - i % 10)
Block::new(i - i % 10)
}
// Get the EBB of a program point.
fn pp_ebb<PP: Into<ExpandedProgramPoint>>(&self, pp: PP) -> Ebb {
// Get the block of a program point.
fn pp_block<PP: Into<ExpandedProgramPoint>>(&self, pp: PP) -> Block {
match pp.into() {
ExpandedProgramPoint::Inst(i) => self.inst_ebb(i),
ExpandedProgramPoint::Ebb(e) => e,
ExpandedProgramPoint::Inst(i) => self.inst_block(i),
ExpandedProgramPoint::Block(e) => e,
}
}
// Validate the live range invariants.
fn validate(&self, lr: &GenericLiveRange<Self>) {
// The def interval must cover a single EBB.
let def_ebb = self.pp_ebb(lr.def_begin);
assert_eq!(def_ebb, self.pp_ebb(lr.def_end));
// The def interval must cover a single block.
let def_block = self.pp_block(lr.def_begin);
assert_eq!(def_block, self.pp_block(lr.def_end));
// Check that the def interval isn't backwards.
match self.cmp(lr.def_begin, lr.def_end) {
@@ -552,7 +552,7 @@ mod tests {
assert!(
self.cmp(lr.def_end, begin) == Ordering::Less
|| self.cmp(lr.def_begin, end) == Ordering::Greater,
"Interval can't overlap the def EBB"
"Interval can't overlap the def block"
);
// Save for next round.
@@ -567,10 +567,10 @@ mod tests {
#[test]
fn dead_def_range() {
let v0 = Value::new(0);
let e0 = Ebb::new(0);
let e0 = Block::new(0);
let i1 = Inst::new(1);
let i2 = Inst::new(2);
let e2 = Ebb::new(2);
let e2 = Block::new(2);
let lr = GenericLiveRange::new(v0, i1.into(), Default::default());
assert!(lr.is_dead());
assert!(lr.is_local());
@@ -588,13 +588,13 @@ mod tests {
#[test]
fn dead_arg_range() {
let v0 = Value::new(0);
let e2 = Ebb::new(2);
let e2 = Block::new(2);
let lr = GenericLiveRange::new(v0, e2.into(), Default::default());
assert!(lr.is_dead());
assert!(lr.is_local());
assert_eq!(lr.def(), e2.into());
assert_eq!(lr.def_local_end(), e2.into());
// The def interval of an EBB argument does not count as live-in.
// The def interval of an block argument does not count as live-in.
assert_eq!(lr.livein_local_end(e2, PO), None);
PO.validate(&lr);
}
@@ -602,13 +602,13 @@ mod tests {
#[test]
fn local_def() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let i13 = Inst::new(13);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e10, i13, PO), false);
assert_eq!(lr.extend_in_block(e10, i13, PO), false);
PO.validate(&lr);
assert!(!lr.is_dead());
assert!(lr.is_local());
@@ -616,7 +616,7 @@ mod tests {
assert_eq!(lr.def_local_end(), i13.into());
// Extending to an already covered inst should not change anything.
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), i11.into());
assert_eq!(lr.def_local_end(), i13.into());
@@ -625,15 +625,15 @@ mod tests {
#[test]
fn local_arg() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let i13 = Inst::new(13);
let mut lr = GenericLiveRange::new(v0, e10.into(), Default::default());
// Extending a dead EBB argument in its own block should not indicate that a live-in
// Extending a dead block argument in its own block should not indicate that a live-in
// interval was created.
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
PO.validate(&lr);
assert!(!lr.is_dead());
assert!(lr.is_local());
@@ -641,13 +641,13 @@ mod tests {
assert_eq!(lr.def_local_end(), i12.into());
// Extending to an already covered inst should not change anything.
assert_eq!(lr.extend_in_ebb(e10, i11, PO), false);
assert_eq!(lr.extend_in_block(e10, i11, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), e10.into());
assert_eq!(lr.def_local_end(), i12.into());
// Extending further.
assert_eq!(lr.extend_in_ebb(e10, i13, PO), false);
assert_eq!(lr.extend_in_block(e10, i13, PO), false);
PO.validate(&lr);
assert_eq!(lr.def(), e10.into());
assert_eq!(lr.def_local_end(), i13.into());
@@ -656,28 +656,28 @@ mod tests {
#[test]
fn global_def() {
let v0 = Value::new(0);
let e10 = Ebb::new(10);
let e10 = Block::new(10);
let i11 = Inst::new(11);
let i12 = Inst::new(12);
let e20 = Ebb::new(20);
let e20 = Block::new(20);
let i21 = Inst::new(21);
let i22 = Inst::new(22);
let i23 = Inst::new(23);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e10, i12, PO), false);
assert_eq!(lr.extend_in_block(e10, i12, PO), false);
// Adding a live-in interval.
assert_eq!(lr.extend_in_ebb(e20, i22, PO), true);
assert_eq!(lr.extend_in_block(e20, i22, PO), true);
PO.validate(&lr);
assert_eq!(lr.livein_local_end(e20, PO), Some(i22));
// Non-extending the live-in.
assert_eq!(lr.extend_in_ebb(e20, i21, PO), false);
assert_eq!(lr.extend_in_block(e20, i21, PO), false);
assert_eq!(lr.livein_local_end(e20, PO), Some(i22));
// Extending the existing live-in.
assert_eq!(lr.extend_in_ebb(e20, i23, PO), false);
assert_eq!(lr.extend_in_block(e20, i23, PO), false);
PO.validate(&lr);
assert_eq!(lr.livein_local_end(e20, PO), Some(i23));
}
@@ -686,35 +686,35 @@ mod tests {
fn coalesce() {
let v0 = Value::new(0);
let i11 = Inst::new(11);
let e20 = Ebb::new(20);
let e20 = Block::new(20);
let i21 = Inst::new(21);
let e30 = Ebb::new(30);
let e30 = Block::new(30);
let i31 = Inst::new(31);
let e40 = Ebb::new(40);
let e40 = Block::new(40);
let i41 = Inst::new(41);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true);
assert_eq!(lr.extend_in_block(e30, i31, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e30, i31)]);
// Coalesce to previous
assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true);
assert_eq!(lr.extend_in_block(e40, i41, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e30, i41)]);
// Coalesce to next
assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true);
assert_eq!(lr.extend_in_block(e20, i21, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i41)]);
let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default());
assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true);
assert_eq!(lr.extend_in_block(e40, i41, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e40, i41)]);
assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true);
assert_eq!(lr.extend_in_block(e20, i21, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i21), (e40, i41)]);
// Coalesce to previous and next
assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true);
assert_eq!(lr.extend_in_block(e30, i31, PO,), true);
assert_eq!(lr.liveins().collect::<Vec<_>>(), [(e20, i41)]);
}
}

View File

@@ -13,7 +13,7 @@ use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::entity::{SparseMap, SparseMapValue};
use crate::ir::{AbiParam, ArgumentLoc, InstBuilder};
use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueLoc};
use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueLoc};
use crate::isa::RegClass;
use crate::isa::{ConstraintKind, EncInfo, Encoding, RecipeConstraints, TargetIsa};
use crate::regalloc::affinity::Affinity;
@@ -113,24 +113,24 @@ impl SparseMapValue<Value> for ReloadedValue {
impl<'a> Context<'a> {
fn run(&mut self, tracker: &mut LiveValueTracker) {
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_ebb(ebb, tracker);
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_block(block, tracker);
}
}
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Reloading {}:", ebb);
self.visit_ebb_header(ebb, tracker);
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Reloading {}:", block);
self.visit_block_header(block, tracker);
tracker.drop_dead_params();
// visit_ebb_header() places us at the first interesting instruction in the EBB.
// visit_block_header() places us at the first interesting instruction in the block.
while let Some(inst) = self.cur.current_inst() {
if !self.cur.func.dfg[inst].opcode().is_ghost() {
// This instruction either has an encoding or has ABI constraints, so visit it to
// insert spills and fills as needed.
let encoding = self.cur.func.encodings[inst];
self.visit_inst(ebb, inst, encoding, tracker);
self.visit_inst(block, inst, encoding, tracker);
tracker.drop_dead(inst);
} else {
// This is a ghost instruction with no encoding and no extra constraints, so we can
@@ -140,29 +140,29 @@ impl<'a> Context<'a> {
}
}
/// Process the EBB parameters. Move to the next instruction in the EBB to be processed
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.ebb_top(
ebb,
/// Process the block parameters. Move to the next instruction in the block to be processed
fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree,
);
if self.cur.func.layout.entry_block() == Some(ebb) {
if self.cur.func.layout.entry_block() == Some(block) {
debug_assert_eq!(liveins.len(), 0);
self.visit_entry_params(ebb, args);
self.visit_entry_params(block, args);
} else {
self.visit_ebb_params(ebb, args);
self.visit_block_params(block, args);
}
}
/// Visit the parameters on the entry block.
/// These values have ABI constraints from the function signature.
fn visit_entry_params(&mut self, ebb: Ebb, args: &[LiveValue]) {
fn visit_entry_params(&mut self, block: Block, args: &[LiveValue]) {
debug_assert_eq!(self.cur.func.signature.params.len(), args.len());
self.cur.goto_first_inst(ebb);
self.cur.goto_first_inst(block);
for (arg_idx, arg) in args.iter().enumerate() {
let abi = self.cur.func.signature.params[arg_idx];
@@ -175,10 +175,10 @@ impl<'a> Context<'a> {
.cur
.func
.dfg
.replace_ebb_param(arg.value, abi.value_type);
.replace_block_param(arg.value, abi.value_type);
let affinity = Affinity::abi(&abi, self.cur.isa);
self.liveness.create_dead(reg, ebb, affinity);
self.insert_spill(ebb, arg.value, reg);
self.liveness.create_dead(reg, block, affinity);
self.insert_spill(block, arg.value, reg);
}
}
ArgumentLoc::Stack(_) => {
@@ -189,15 +189,15 @@ impl<'a> Context<'a> {
}
}
fn visit_ebb_params(&mut self, ebb: Ebb, _args: &[LiveValue]) {
self.cur.goto_first_inst(ebb);
fn visit_block_params(&mut self, block: Block, _args: &[LiveValue]) {
self.cur.goto_first_inst(block);
}
/// Process the instruction pointed to by `pos`, and advance the cursor to the next instruction
/// that needs processing.
fn visit_inst(
&mut self,
ebb: Ebb,
block: Block,
inst: Inst,
encoding: Encoding,
tracker: &mut LiveValueTracker,
@@ -265,7 +265,7 @@ impl<'a> Context<'a> {
{
self.reload_copy_candidates(inst);
} else {
self.reload_inst_candidates(ebb, inst);
self.reload_inst_candidates(block, inst);
}
// TODO: Reuse reloads for future instructions.
@@ -304,7 +304,7 @@ impl<'a> Context<'a> {
let value_type = self.cur.func.dfg.value_type(lv.value);
let reg = self.cur.func.dfg.replace_result(lv.value, value_type);
self.liveness.create_dead(reg, inst, Affinity::new(op));
self.insert_spill(ebb, lv.value, reg);
self.insert_spill(block, lv.value, reg);
}
}
}
@@ -333,14 +333,14 @@ impl<'a> Context<'a> {
let reg = self.cur.func.dfg.replace_result(lv.value, abi.value_type);
self.liveness
.create_dead(reg, inst, Affinity::abi(&abi, self.cur.isa));
self.insert_spill(ebb, lv.value, reg);
self.insert_spill(block, lv.value, reg);
}
}
}
}
// Reload the current candidates for the given `inst`.
fn reload_inst_candidates(&mut self, ebb: Ebb, inst: Inst) {
fn reload_inst_candidates(&mut self, block: Block, inst: Inst) {
// Insert fill instructions before `inst` and replace `cand.value` with the filled value.
for cand in self.candidates.iter_mut() {
if let Some(reload) = self.reloads.get(cand.value) {
@@ -361,15 +361,15 @@ impl<'a> Context<'a> {
let affinity = Affinity::Reg(cand.regclass.into());
self.liveness.create_dead(reg, fill, affinity);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
.extend_locally(reg, block, inst, &self.cur.func.layout);
}
// Rewrite instruction arguments.
//
// Only rewrite those arguments that were identified as candidates. This leaves EBB
// arguments on branches as-is without rewriting them. A spilled EBB argument needs to stay
// spilled because the matching EBB parameter is going to be in the same virtual register
// and therefore the same stack slot as the EBB argument value.
// Only rewrite those arguments that were identified as candidates. This leaves block
// arguments on branches as-is without rewriting them. A spilled block argument needs to stay
// spilled because the matching block parameter is going to be in the same virtual register
// and therefore the same stack slot as the block argument value.
if !self.candidates.is_empty() {
let args = self.cur.func.dfg.inst_args_mut(inst);
while let Some(cand) = self.candidates.pop() {
@@ -448,14 +448,14 @@ impl<'a> Context<'a> {
/// - Insert `stack = spill reg` at `pos`, and assign an encoding.
/// - Move the `stack` live range starting point to the new instruction.
/// - Extend the `reg` live range to reach the new instruction.
fn insert_spill(&mut self, ebb: Ebb, stack: Value, reg: Value) {
fn insert_spill(&mut self, block: Block, stack: Value, reg: Value) {
self.cur.ins().with_result(stack).spill(reg);
let inst = self.cur.built_inst();
// Update live ranges.
self.liveness.move_def_locally(stack, inst);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
.extend_locally(reg, block, inst, &self.cur.func.layout);
}
}

View File

@@ -32,7 +32,7 @@ fn insert_and_encode_safepoint<'f>(
}
// The emit_stackmaps() function analyzes each instruction to retrieve the liveness of
// the defs and operands by traversing a function's ebbs in layout order.
// the defs and operands by traversing a function's blocks in layout order.
pub fn emit_stackmaps(
func: &mut Function,
domtree: &DominatorTree,
@@ -42,13 +42,13 @@ pub fn emit_stackmaps(
) {
let mut curr = func.layout.entry_block();
while let Some(ebb) = curr {
tracker.ebb_top(ebb, &func.dfg, liveness, &func.layout, domtree);
while let Some(block) = curr {
tracker.block_top(block, &func.dfg, liveness, &func.layout, domtree);
tracker.drop_dead_params();
let mut pos = FuncCursor::new(func);
// From the top of the ebb, step through the instructions.
pos.goto_top(ebb);
// From the top of the block, step through the instructions.
pos.goto_top(block);
while let Some(inst) = pos.next_inst() {
if let InstructionData::Trap {
@@ -67,6 +67,6 @@ pub fn emit_stackmaps(
tracker.process_inst(inst, &pos.func.dfg, liveness);
tracker.drop_dead(inst);
}
curr = func.layout.next_ebb(ebb);
curr = func.layout.next_block(block);
}
}

View File

@@ -34,20 +34,20 @@
//! # Register diversions and global interference
//!
//! We can divert register values temporarily to satisfy constraints, but we need to put the
//! values back into their originally assigned register locations before leaving the EBB.
//! Otherwise, values won't be in the right register at the entry point of other EBBs.
//! values back into their originally assigned register locations before leaving the block.
//! Otherwise, values won't be in the right register at the entry point of other blocks.
//!
//! Some values are *local*, and we don't need to worry about putting those values back since they
//! are not used in any other EBBs.
//! are not used in any other blocks.
//!
//! When we assign register locations to defines, we are assigning both the register used locally
//! immediately after the instruction and the register used globally when the defined value is used
//! in a different EBB. We need to avoid interference both locally at the instruction and globally.
//! in a different block. We need to avoid interference both locally at the instruction and globally.
//!
//! We have multiple mappings of values to registers:
//!
//! 1. The initial local mapping before the instruction. This includes any diversions from previous
//! instructions in the EBB, but not diversions for the current instruction.
//! instructions in the block, but not diversions for the current instruction.
//! 2. The local mapping after applying the additional reassignments required to satisfy the
//! constraints of the current instruction.
//! 3. The local mapping after the instruction. This excludes values killed by the instruction and

View File

@@ -17,7 +17,7 @@
use crate::cursor::{Cursor, EncCursor};
use crate::dominator_tree::DominatorTree;
use crate::ir::{ArgumentLoc, Ebb, Function, Inst, InstBuilder, SigRef, Value, ValueLoc};
use crate::ir::{ArgumentLoc, Block, Function, Inst, InstBuilder, SigRef, Value, ValueLoc};
use crate::isa::registers::{RegClass, RegClassIndex, RegClassMask, RegUnit};
use crate::isa::{ConstraintKind, EncInfo, RecipeConstraints, RegInfo, TargetIsa};
use crate::regalloc::affinity::Affinity;
@@ -121,22 +121,22 @@ impl Spilling {
impl<'a> Context<'a> {
fn run(&mut self, tracker: &mut LiveValueTracker) {
self.topo.reset(self.cur.func.layout.ebbs());
while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_ebb(ebb, tracker);
self.topo.reset(self.cur.func.layout.blocks());
while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) {
self.visit_block(block, tracker);
}
}
fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
debug!("Spilling {}:", ebb);
self.cur.goto_top(ebb);
self.visit_ebb_header(ebb, tracker);
fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) {
debug!("Spilling {}:", block);
self.cur.goto_top(block);
self.visit_block_header(block, tracker);
tracker.drop_dead_params();
self.process_spills(tracker);
while let Some(inst) = self.cur.next_inst() {
if !self.cur.func.dfg[inst].opcode().is_ghost() {
self.visit_inst(inst, ebb, tracker);
self.visit_inst(inst, block, tracker);
} else {
let (_throughs, kills) = tracker.process_ghost(inst);
self.free_regs(kills);
@@ -185,9 +185,9 @@ impl<'a> Context<'a> {
}
}
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, params) = tracker.ebb_top(
ebb,
fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) {
let (liveins, params) = tracker.block_top(
block,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
@@ -199,26 +199,26 @@ impl<'a> Context<'a> {
self.pressure.reset();
self.take_live_regs(liveins);
// An EBB can have an arbitrary (up to 2^16...) number of parameters, so they are not
// An block can have an arbitrary (up to 2^16...) number of parameters, so they are not
// guaranteed to fit in registers.
for lv in params {
if let Affinity::Reg(rci) = lv.affinity {
let rc = self.reginfo.rc(rci);
'try_take: while let Err(mask) = self.pressure.take_transient(rc) {
debug!("Need {} reg for EBB param {}", rc, lv.value);
debug!("Need {} reg for block param {}", rc, lv.value);
match self.spill_candidate(mask, liveins) {
Some(cand) => {
debug!(
"Spilling live-in {} to make room for {} EBB param {}",
"Spilling live-in {} to make room for {} block param {}",
cand, rc, lv.value
);
self.spill_reg(cand);
}
None => {
// We can't spill any of the live-in registers, so we have to spill an
// EBB argument. Since the current spill metric would consider all the
// EBB arguments equal, just spill the present register.
debug!("Spilling {} EBB argument {}", rc, lv.value);
// block argument. Since the current spill metric would consider all the
// block arguments equal, just spill the present register.
debug!("Spilling {} block argument {}", rc, lv.value);
// Since `spill_reg` will free a register, add the current one here.
self.pressure.take(rc);
@@ -230,15 +230,15 @@ impl<'a> Context<'a> {
}
}
// The transient pressure counts for the EBB arguments are accurate. Just preserve them.
// The transient pressure counts for the block arguments are accurate. Just preserve them.
self.pressure.preserve_transient();
self.free_dead_regs(params);
}
fn visit_inst(&mut self, inst: Inst, ebb: Ebb, tracker: &mut LiveValueTracker) {
fn visit_inst(&mut self, inst: Inst, block: Block, tracker: &mut LiveValueTracker) {
debug!("Inst {}, {}", self.cur.display_inst(inst), self.pressure);
debug_assert_eq!(self.cur.current_inst(), Some(inst));
debug_assert_eq!(self.cur.current_ebb(), Some(ebb));
debug_assert_eq!(self.cur.current_block(), Some(block));
let constraints = self
.encinfo
@@ -246,7 +246,7 @@ impl<'a> Context<'a> {
// We may need to resolve register constraints if there are any noteworthy uses.
debug_assert!(self.reg_uses.is_empty());
self.collect_reg_uses(inst, ebb, constraints);
self.collect_reg_uses(inst, block, constraints);
// Calls usually have fixed register uses.
let call_sig = self.cur.func.dfg.call_signature(inst);
@@ -313,7 +313,12 @@ impl<'a> Context<'a> {
// We are assuming here that if a value is used both by a fixed register operand and a register
// class operand, they two are compatible. We are also assuming that two register class
// operands are always compatible.
fn collect_reg_uses(&mut self, inst: Inst, ebb: Ebb, constraints: Option<&RecipeConstraints>) {
fn collect_reg_uses(
&mut self,
inst: Inst,
block: Block,
constraints: Option<&RecipeConstraints>,
) {
let args = self.cur.func.dfg.inst_args(inst);
let num_fixed_ins = if let Some(constraints) = constraints {
for (idx, (op, &arg)) in constraints.ins.iter().zip(args).enumerate() {
@@ -324,11 +329,11 @@ impl<'a> Context<'a> {
ConstraintKind::FixedReg(_) => reguse.fixed = true,
ConstraintKind::Tied(_) => {
// A tied operand must kill the used value.
reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout);
reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout);
}
ConstraintKind::FixedTied(_) => {
reguse.fixed = true;
reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout);
reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout);
}
ConstraintKind::Reg => {}
}
@@ -450,10 +455,10 @@ impl<'a> Context<'a> {
// Spill a live register that is *not* used by the current instruction.
// Spilling a use wouldn't help.
//
// Do allow spilling of EBB arguments on branches. This is safe since we spill
// the whole virtual register which includes the matching EBB parameter value
// Do allow spilling of block arguments on branches. This is safe since we spill
// the whole virtual register which includes the matching block parameter value
// at the branch destination. It is also necessary since there can be
// arbitrarily many EBB arguments.
// arbitrarily many block arguments.
match {
let args = if self.cur.func.dfg[inst].opcode().is_branch() {
self.cur.func.dfg.inst_fixed_args(inst)
@@ -572,7 +577,7 @@ impl<'a> Context<'a> {
self.liveness.create_dead(copy, inst, Affinity::Reg(rci));
self.liveness.extend_locally(
copy,
self.cur.func.layout.pp_ebb(inst),
self.cur.func.layout.pp_block(inst),
self.cur.current_inst().expect("must be at an instruction"),
&self.cur.func.layout,
);

View File

@@ -5,11 +5,11 @@
//! output.
//!
//! A virtual register is typically built by merging together SSA values that are "phi-related" -
//! that is, one value is passed as an EBB argument to a branch and the other is the EBB parameter
//! that is, one value is passed as an block argument to a branch and the other is the block parameter
//! value itself.
//!
//! If any values in a virtual register are spilled, they will use the same stack slot. This avoids
//! memory-to-memory copies when a spilled value is passed as an EBB argument.
//! memory-to-memory copies when a spilled value is passed as an block argument.
use crate::dbg::DisplayList;
use crate::dominator_tree::DominatorTreePreorder;

View File

@@ -59,7 +59,7 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) {
let _tt = timing::gvn();
debug_assert!(domtree.is_valid());
// Visit EBBs in a reverse post-order.
// Visit blocks in a reverse post-order.
//
// The RefCell here is a bit ugly since the HashKeys in the ScopedHashMap
// need a reference to the function.
@@ -68,13 +68,13 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) {
let mut visible_values: ScopedHashMap<HashKey, Inst> = ScopedHashMap::new();
let mut scope_stack: Vec<Inst> = Vec::new();
for &ebb in domtree.cfg_postorder().iter().rev() {
for &block in domtree.cfg_postorder().iter().rev() {
{
// Pop any scopes that we just exited.
let layout = &pos.borrow().func.layout;
loop {
if let Some(current) = scope_stack.last() {
if domtree.dominates(*current, ebb, layout) {
if domtree.dominates(*current, block, layout) {
break;
}
} else {
@@ -85,11 +85,11 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) {
}
// Push a scope for the current block.
scope_stack.push(layout.first_inst(ebb).unwrap());
scope_stack.push(layout.first_inst(block).unwrap());
visible_values.increment_depth();
}
pos.borrow_mut().goto_top(ebb);
pos.borrow_mut().goto_top(block);
while let Some(inst) = {
let mut pos = pos.borrow_mut();
pos.next_inst()

View File

@@ -14,7 +14,7 @@ use crate::ir::{
immediates,
instructions::{Opcode, ValueList},
types::{I16, I32, I64, I8},
DataFlowGraph, Ebb, Function, Inst, InstBuilder, InstructionData, Type, Value,
Block, DataFlowGraph, Function, Inst, InstBuilder, InstructionData, Type, Value,
};
use crate::isa::TargetIsa;
use crate::timing;
@@ -810,10 +810,10 @@ enum BranchOrderKind {
/// Reorder branches to encourage fallthroughs.
///
/// When an ebb ends with a conditional branch followed by an unconditional
/// branch, this will reorder them if one of them is branching to the next Ebb
/// When an block ends with a conditional branch followed by an unconditional
/// branch, this will reorder them if one of them is branching to the next Block
/// layout-wise. The unconditional jump can then become a fallthrough.
fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst: Inst) {
fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, block: Block, inst: Inst) {
let (term_inst, term_inst_args, term_dest, cond_inst, cond_inst_args, cond_dest, kind) =
match pos.func.dfg[inst] {
InstructionData::Jump {
@@ -821,13 +821,13 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst
destination,
ref args,
} => {
let next_ebb = if let Some(next_ebb) = pos.func.layout.next_ebb(ebb) {
next_ebb
let next_block = if let Some(next_block) = pos.func.layout.next_block(block) {
next_block
} else {
return;
};
if destination == next_ebb {
if destination == next_block {
return;
}
@@ -840,7 +840,7 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst
let prev_inst_data = &pos.func.dfg[prev_inst];
if let Some(prev_dest) = prev_inst_data.branch_destination() {
if prev_dest != next_ebb {
if prev_dest != next_block {
return;
}
} else {
@@ -941,7 +941,7 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst
}
}
cfg.recompute_ebb(pos.func, ebb);
cfg.recompute_block(pos.func, block);
}
/// The main pre-opt pass.
@@ -949,7 +949,7 @@ pub fn do_preopt(func: &mut Function, cfg: &mut ControlFlowGraph, isa: &dyn Targ
let _tt = timing::preopt();
let mut pos = FuncCursor::new(func);
let native_word_width = isa.pointer_bytes();
while let Some(ebb) = pos.next_ebb() {
while let Some(block) = pos.next_block() {
while let Some(inst) = pos.next_inst() {
// Apply basic simplifications.
simplify(&mut pos, inst, native_word_width as u32);
@@ -961,7 +961,7 @@ pub fn do_preopt(func: &mut Function, cfg: &mut ControlFlowGraph, isa: &dyn Targ
}
branch_opt(&mut pos, inst);
branch_order(&mut pos, cfg, ebb, inst);
branch_order(&mut pos, cfg, block, inst);
}
}
}

View File

@@ -1,28 +1,28 @@
//! Topological order of EBBs, according to the dominator tree.
//! Topological order of blocks, according to the dominator tree.
use crate::dominator_tree::DominatorTree;
use crate::entity::EntitySet;
use crate::ir::{Ebb, Layout};
use crate::ir::{Block, Layout};
use alloc::vec::Vec;
/// Present EBBs in a topological order such that all dominating EBBs are guaranteed to be visited
/// before the current EBB.
/// Present blocks in a topological order such that all dominating blocks are guaranteed to be visited
/// before the current block.
///
/// There are many topological orders of the EBBs in a function, so it is possible to provide a
/// preferred order, and the `TopoOrder` will present EBBs in an order that is as close as possible
/// There are many topological orders of the blocks in a function, so it is possible to provide a
/// preferred order, and the `TopoOrder` will present blocks in an order that is as close as possible
/// to the preferred order.
pub struct TopoOrder {
/// Preferred order of EBBs to visit.
preferred: Vec<Ebb>,
/// Preferred order of blocks to visit.
preferred: Vec<Block>,
/// Next entry to get from `preferred`.
next: usize,
/// Set of visited EBBs.
visited: EntitySet<Ebb>,
/// Set of visited blocks.
visited: EntitySet<Block>,
/// Stack of EBBs to be visited next, already in `visited`.
stack: Vec<Ebb>,
/// Stack of blocks to be visited next, already in `visited`.
stack: Vec<Block>,
}
impl TopoOrder {
@@ -44,11 +44,11 @@ impl TopoOrder {
self.stack.clear();
}
/// Reset and initialize with a preferred sequence of EBBs. The resulting topological order is
/// guaranteed to contain all of the EBBs in `preferred` as well as any dominators.
pub fn reset<Ebbs>(&mut self, preferred: Ebbs)
/// Reset and initialize with a preferred sequence of blocks. The resulting topological order is
/// guaranteed to contain all of the blocks in `preferred` as well as any dominators.
pub fn reset<Blocks>(&mut self, preferred: Blocks)
where
Ebbs: IntoIterator<Item = Ebb>,
Blocks: IntoIterator<Item = Block>,
{
self.preferred.clear();
self.preferred.extend(preferred);
@@ -57,27 +57,29 @@ impl TopoOrder {
self.stack.clear();
}
/// Get the next EBB in the topological order.
/// Get the next block in the topological order.
///
/// Two things are guaranteed about the EBBs returned by this function:
/// Two things are guaranteed about the blocks returned by this function:
///
/// - All EBBs in the `preferred` iterator given to `reset` will be returned.
/// - All dominators are visited before the EBB returned.
pub fn next(&mut self, layout: &Layout, domtree: &DominatorTree) -> Option<Ebb> {
self.visited.resize(layout.ebb_capacity());
/// - All blocks in the `preferred` iterator given to `reset` will be returned.
/// - All dominators are visited before the block returned.
pub fn next(&mut self, layout: &Layout, domtree: &DominatorTree) -> Option<Block> {
self.visited.resize(layout.block_capacity());
// Any entries in `stack` should be returned immediately. They have already been added to
// `visited`.
while self.stack.is_empty() {
match self.preferred.get(self.next).cloned() {
None => return None,
Some(mut ebb) => {
// We have the next EBB in the preferred order.
Some(mut block) => {
// We have the next block in the preferred order.
self.next += 1;
// Push it along with any non-visited dominators.
while self.visited.insert(ebb) {
self.stack.push(ebb);
match domtree.idom(ebb) {
Some(idom) => ebb = layout.inst_ebb(idom).expect("idom not in layout"),
while self.visited.insert(block) {
self.stack.push(block);
match domtree.idom(block) {
Some(idom) => {
block = layout.inst_block(idom).expect("idom not in layout")
}
None => break,
}
}
@@ -105,32 +107,32 @@ mod tests {
let mut topo = TopoOrder::new();
assert_eq!(topo.next(&func.layout, &domtree), None);
topo.reset(func.layout.ebbs());
topo.reset(func.layout.blocks());
assert_eq!(topo.next(&func.layout, &domtree), None);
}
#[test]
fn simple() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let ebb1 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let block1 = func.dfg.make_block();
{
let mut cur = FuncCursor::new(&mut func);
cur.insert_ebb(ebb0);
cur.ins().jump(ebb1, &[]);
cur.insert_ebb(ebb1);
cur.ins().jump(ebb1, &[]);
cur.insert_block(block0);
cur.ins().jump(block1, &[]);
cur.insert_block(block1);
cur.ins().jump(block1, &[]);
}
let cfg = ControlFlowGraph::with_function(&func);
let domtree = DominatorTree::with_function(&func, &cfg);
let mut topo = TopoOrder::new();
topo.reset(iter::once(ebb1));
assert_eq!(topo.next(&func.layout, &domtree), Some(ebb0));
assert_eq!(topo.next(&func.layout, &domtree), Some(ebb1));
topo.reset(iter::once(block1));
assert_eq!(topo.next(&func.layout, &domtree), Some(block0));
assert_eq!(topo.next(&func.layout, &domtree), Some(block1));
assert_eq!(topo.next(&func.layout, &domtree), None);
}
}

View File

@@ -9,7 +9,7 @@ use log::debug;
/// Eliminate unreachable code.
///
/// This pass deletes whole EBBs that can't be reached from the entry block. It does not delete
/// This pass deletes whole blocks that can't be reached from the entry block. It does not delete
/// individual instructions whose results are unused.
///
/// The reachability analysis is performed by the dominator tree analysis.
@@ -20,27 +20,27 @@ pub fn eliminate_unreachable_code(
) {
let _tt = timing::unreachable_code();
let mut pos = FuncCursor::new(func);
while let Some(ebb) = pos.next_ebb() {
if domtree.is_reachable(ebb) {
while let Some(block) = pos.next_block() {
if domtree.is_reachable(block) {
continue;
}
debug!("Eliminating unreachable {}", ebb);
debug!("Eliminating unreachable {}", block);
// Move the cursor out of the way and make sure the next lop iteration goes to the right
// EBB.
pos.prev_ebb();
// block.
pos.prev_block();
// Remove all instructions from `ebb`.
while let Some(inst) = pos.func.layout.first_inst(ebb) {
// Remove all instructions from `block`.
while let Some(inst) = pos.func.layout.first_inst(block) {
debug!(" - {}", pos.func.dfg.display_inst(inst, None));
pos.func.layout.remove_inst(inst);
}
// Once the EBB is completely empty, we can update the CFG which removes it from any
// Once the block is completely empty, we can update the CFG which removes it from any
// predecessor lists.
cfg.recompute_ebb(pos.func, ebb);
cfg.recompute_block(pos.func, block);
// Finally, remove the EBB from the layout.
pos.func.layout.remove_ebb(ebb);
// Finally, remove the block from the layout.
pos.func.layout.remove_block(block);
}
}

View File

@@ -93,8 +93,8 @@ where
{
let values_labels = build_value_labels_index::<T>(func);
let mut ebbs = func.layout.ebbs().collect::<Vec<_>>();
ebbs.sort_by_key(|ebb| func.offsets[*ebb]); // Ensure inst offsets always increase
let mut blocks = func.layout.blocks().collect::<Vec<_>>();
blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
let encinfo = isa.encoding_info();
let values_locations = &func.locations;
let liveness_ranges = regalloc.liveness().ranges();
@@ -117,16 +117,16 @@ where
let mut end_offset = 0;
let mut tracked_values: Vec<(Value, ValueLabel, u32, ValueLoc)> = Vec::new();
let mut divert = RegDiversions::new();
for ebb in ebbs {
divert.at_ebb(&func.entry_diversions, ebb);
for block in blocks {
divert.at_block(&func.entry_diversions, block);
let mut last_srcloc: Option<T> = None;
for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) {
for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
divert.apply(&func.dfg[inst]);
end_offset = offset + size;
// Remove killed values.
tracked_values.retain(|(x, label, start_offset, last_loc)| {
let range = liveness_ranges.get(*x);
if range.expect("value").killed_at(inst, ebb, &func.layout) {
if range.expect("value").killed_at(inst, block, &func.layout) {
add_range(*label, (*start_offset, end_offset), *last_loc);
return false;
}
@@ -173,7 +173,7 @@ where
// Ignore dead/inactive Values.
let range = liveness_ranges.get(*v);
match range {
Some(r) => r.reaches_use(inst, ebb, &func.layout),
Some(r) => r.reaches_use(inst, block, &func.layout),
None => false,
}
});

View File

@@ -2,7 +2,7 @@
use crate::dbg::DisplayList;
use crate::dominator_tree::{DominatorTree, DominatorTreePreorder};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::{ExpandedProgramPoint, Function};
use crate::regalloc::liveness::Liveness;
use crate::regalloc::virtregs::VirtRegs;
@@ -13,7 +13,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult};
///
/// Conventional SSA form is represented in Cranelift with the help of virtual registers:
///
/// - Two values are said to be *PHI-related* if one is an EBB argument and the other is passed as
/// - Two values are said to be *PHI-related* if one is an block argument and the other is passed as
/// a branch argument in a location that matches the first value.
/// - PHI-related values must belong to the same virtual register.
/// - Two values in the same virtual register must not have overlapping live ranges.
@@ -76,10 +76,10 @@ impl<'a> CssaVerifier<'a> {
// Check topological ordering with the previous values in the virtual register.
let def: ExpandedProgramPoint = self.func.dfg.value_def(val).into();
let def_ebb = self.func.layout.pp_ebb(def);
let def_block = self.func.layout.pp_block(def);
for &prev_val in &values[0..idx] {
let prev_def: ExpandedProgramPoint = self.func.dfg.value_def(prev_val).into();
let prev_ebb = self.func.layout.pp_ebb(prev_def);
let prev_block = self.func.layout.pp_block(prev_def);
if prev_def == def {
return errors.fatal((
@@ -95,7 +95,7 @@ impl<'a> CssaVerifier<'a> {
}
// Enforce topological ordering of defs in the virtual register.
if self.preorder.dominates(def_ebb, prev_ebb)
if self.preorder.dominates(def_block, prev_block)
&& self.domtree.dominates(def, prev_def, &self.func.layout)
{
return errors.fatal((
@@ -115,12 +115,12 @@ impl<'a> CssaVerifier<'a> {
// We only have to check against the nearest dominating value.
for &prev_val in values[0..idx].iter().rev() {
let prev_def: ExpandedProgramPoint = self.func.dfg.value_def(prev_val).into();
let prev_ebb = self.func.layout.pp_ebb(prev_def);
let prev_block = self.func.layout.pp_block(prev_def);
if self.preorder.dominates(prev_ebb, def_ebb)
if self.preorder.dominates(prev_block, def_block)
&& self.domtree.dominates(prev_def, def, &self.func.layout)
{
if self.liveness[prev_val].overlaps_def(def, def_ebb, &self.func.layout) {
if self.liveness[prev_val].overlaps_def(def, def_block, &self.func.layout) {
return errors.fatal((
val,
format!(
@@ -142,24 +142,24 @@ impl<'a> CssaVerifier<'a> {
}
fn check_cssa(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
for ebb in self.func.layout.ebbs() {
let ebb_params = self.func.dfg.ebb_params(ebb);
for BasicBlock { inst: pred, .. } in self.cfg.pred_iter(ebb) {
for block in self.func.layout.blocks() {
let block_params = self.func.dfg.block_params(block);
for BlockPredecessor { inst: pred, .. } in self.cfg.pred_iter(block) {
let pred_args = self.func.dfg.inst_variable_args(pred);
// This should have been caught by an earlier verifier pass.
assert_eq!(
ebb_params.len(),
block_params.len(),
pred_args.len(),
"Wrong arguments on branch."
);
for (&ebb_param, &pred_arg) in ebb_params.iter().zip(pred_args) {
if !self.virtregs.same_class(ebb_param, pred_arg) {
for (&block_param, &pred_arg) in block_params.iter().zip(pred_args) {
if !self.virtregs.same_class(block_param, pred_arg) {
return errors.fatal((
pred,
format!(
"{} and {} must be in the same virtual register",
ebb_param, pred_arg
block_param, pred_arg
),
));
}

View File

@@ -1,7 +1,7 @@
//! Verify CPU flags values.
use crate::entity::{EntitySet, SecondaryMap};
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir;
use crate::ir::instructions::BranchInfo;
use crate::isa;
@@ -42,33 +42,33 @@ struct FlagsVerifier<'a> {
cfg: &'a ControlFlowGraph,
encinfo: Option<isa::EncInfo>,
/// The single live-in flags value (if any) for each EBB.
livein: SecondaryMap<ir::Ebb, PackedOption<ir::Value>>,
/// The single live-in flags value (if any) for each block.
livein: SecondaryMap<ir::Block, PackedOption<ir::Value>>,
}
impl<'a> FlagsVerifier<'a> {
fn check(&mut self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
// List of EBBs that need to be processed. EBBs may be re-added to this list when we detect
// List of blocks that need to be processed. blocks may be re-added to this list when we detect
// that one of their successor blocks needs a live-in flags value.
let mut worklist = EntitySet::with_capacity(self.func.layout.ebb_capacity());
for ebb in self.func.layout.ebbs() {
worklist.insert(ebb);
let mut worklist = EntitySet::with_capacity(self.func.layout.block_capacity());
for block in self.func.layout.blocks() {
worklist.insert(block);
}
while let Some(ebb) = worklist.pop() {
if let Some(value) = self.visit_ebb(ebb, errors)? {
// The EBB has live-in flags. Check if the value changed.
match self.livein[ebb].expand() {
// Revisit any predecessor blocks the first time we see a live-in for `ebb`.
while let Some(block) = worklist.pop() {
if let Some(value) = self.visit_block(block, errors)? {
// The block has live-in flags. Check if the value changed.
match self.livein[block].expand() {
// Revisit any predecessor blocks the first time we see a live-in for `block`.
None => {
self.livein[ebb] = value.into();
for BasicBlock { ebb: pred, .. } in self.cfg.pred_iter(ebb) {
self.livein[block] = value.into();
for BlockPredecessor { block: pred, .. } in self.cfg.pred_iter(block) {
worklist.insert(pred);
}
}
Some(old) if old != value => {
return errors.fatal((
ebb,
block,
format!("conflicting live-in CPU flags: {} and {}", old, value),
));
}
@@ -76,24 +76,24 @@ impl<'a> FlagsVerifier<'a> {
}
} else {
// Existing live-in flags should never be able to disappear.
assert_eq!(self.livein[ebb].expand(), None);
assert_eq!(self.livein[block].expand(), None);
}
}
Ok(())
}
/// Check flags usage in `ebb` and return the live-in flags value, if any.
fn visit_ebb(
/// Check flags usage in `block` and return the live-in flags value, if any.
fn visit_block(
&self,
ebb: ir::Ebb,
block: ir::Block,
errors: &mut VerifierErrors,
) -> VerifierStepResult<Option<ir::Value>> {
// The single currently live flags value.
let mut live_val = None;
// Visit instructions backwards so we can track liveness accurately.
for inst in self.func.layout.ebb_insts(ebb).rev() {
for inst in self.func.layout.block_insts(block).rev() {
// Check if `inst` interferes with existing live flags.
if let Some(live) = live_val {
for &res in self.func.dfg.inst_results(inst) {
@@ -130,7 +130,7 @@ impl<'a> FlagsVerifier<'a> {
}
}
// Include live-in flags to successor EBBs.
// Include live-in flags to successor blocks.
match self.func.dfg.analyze_branch(inst) {
BranchInfo::NotABranch => {}
BranchInfo::SingleDest(dest, _) => {

View File

@@ -1,6 +1,6 @@
//! Liveness verifier.
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir::entities::AnyEntity;
use crate::ir::{ExpandedProgramPoint, Function, ProgramPoint, Value};
use crate::isa::TargetIsa;
@@ -16,7 +16,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult};
/// - All values in the program must have a live range.
/// - The live range def point must match where the value is defined.
/// - The live range must reach all uses.
/// - When a live range is live-in to an EBB, it must be live at all the predecessors.
/// - When a live range is live-in to an block, it must be live at all the predecessors.
/// - The live range affinity must be compatible with encoding constraints.
///
/// We don't verify that live ranges are minimal. This would require recomputing live ranges for
@@ -35,7 +35,7 @@ pub fn verify_liveness(
cfg,
liveness,
};
verifier.check_ebbs(errors)?;
verifier.check_blocks(errors)?;
verifier.check_insts(errors)?;
Ok(())
}
@@ -48,17 +48,18 @@ struct LivenessVerifier<'a> {
}
impl<'a> LivenessVerifier<'a> {
/// Check all EBB arguments.
fn check_ebbs(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
for ebb in self.func.layout.ebbs() {
for &val in self.func.dfg.ebb_params(ebb) {
/// Check all block arguments.
fn check_blocks(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
for block in self.func.layout.blocks() {
for &val in self.func.dfg.block_params(block) {
let lr = match self.liveness.get(val) {
Some(lr) => lr,
None => {
return errors.fatal((ebb, format!("EBB arg {} has no live range", val)))
return errors
.fatal((block, format!("block arg {} has no live range", val)))
}
};
self.check_lr(ebb.into(), val, lr, errors)?;
self.check_lr(block.into(), val, lr, errors)?;
}
}
Ok(())
@@ -66,8 +67,8 @@ impl<'a> LivenessVerifier<'a> {
/// Check all instructions.
fn check_insts(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
for ebb in self.func.layout.ebbs() {
for inst in self.func.layout.ebb_insts(ebb) {
for block in self.func.layout.blocks() {
for inst in self.func.layout.block_insts(block) {
let encoding = self.func.encodings[inst];
// Check the defs.
@@ -110,8 +111,8 @@ impl<'a> LivenessVerifier<'a> {
None => return errors.fatal((inst, format!("{} has no live range", val))),
};
debug_assert!(self.func.layout.inst_ebb(inst).unwrap() == ebb);
if !lr.reaches_use(inst, ebb, &self.func.layout) {
debug_assert!(self.func.layout.inst_block(inst).unwrap() == block);
if !lr.reaches_use(inst, block, &self.func.layout) {
return errors.fatal((inst, format!("{} is not live at this use", val)));
}
@@ -143,7 +144,7 @@ impl<'a> LivenessVerifier<'a> {
let l = &self.func.layout;
let loc: AnyEntity = match def.into() {
ExpandedProgramPoint::Ebb(e) => e.into(),
ExpandedProgramPoint::Block(e) => e.into(),
ExpandedProgramPoint::Inst(i) => i.into(),
};
if lr.def() != def {
@@ -159,66 +160,70 @@ impl<'a> LivenessVerifier<'a> {
return Ok(());
}
}
let def_ebb = match def.into() {
ExpandedProgramPoint::Ebb(e) => e,
ExpandedProgramPoint::Inst(i) => l.inst_ebb(i).unwrap(),
let def_block = match def.into() {
ExpandedProgramPoint::Block(e) => e,
ExpandedProgramPoint::Inst(i) => l.inst_block(i).unwrap(),
};
match lr.def_local_end().into() {
ExpandedProgramPoint::Ebb(e) => {
ExpandedProgramPoint::Block(e) => {
return errors.fatal((
loc,
format!("Def local range for {} can't end at {}", val, e),
));
}
ExpandedProgramPoint::Inst(i) => {
if self.func.layout.inst_ebb(i) != Some(def_ebb) {
return errors.fatal((loc, format!("Def local end for {} in wrong ebb", val)));
if self.func.layout.inst_block(i) != Some(def_block) {
return errors
.fatal((loc, format!("Def local end for {} in wrong block", val)));
}
}
}
// Now check the live-in intervals against the CFG.
for (mut ebb, end) in lr.liveins() {
if !l.is_ebb_inserted(ebb) {
for (mut block, end) in lr.liveins() {
if !l.is_block_inserted(block) {
return errors.fatal((
loc,
format!("{} livein at {} which is not in the layout", val, ebb),
format!("{} livein at {} which is not in the layout", val, block),
));
}
let end_ebb = match l.inst_ebb(end) {
let end_block = match l.inst_block(end) {
Some(e) => e,
None => {
return errors.fatal((
loc,
format!(
"{} livein for {} ends at {} which is not in the layout",
val, ebb, end
val, block, end
),
));
}
};
// Check all the EBBs in the interval independently.
// Check all the blocks in the interval independently.
loop {
// If `val` is live-in at `ebb`, it must be live at all the predecessors.
for BasicBlock { inst: pred, ebb } in self.cfg.pred_iter(ebb) {
if !lr.reaches_use(pred, ebb, &self.func.layout) {
// If `val` is live-in at `block`, it must be live at all the predecessors.
for BlockPredecessor { inst: pred, block } in self.cfg.pred_iter(block) {
if !lr.reaches_use(pred, block, &self.func.layout) {
return errors.fatal((
pred,
format!("{} is live in to {} but not live at predecessor", val, ebb),
format!(
"{} is live in to {} but not live at predecessor",
val, block
),
));
}
}
if ebb == end_ebb {
if block == end_block {
break;
}
ebb = match l.next_ebb(ebb) {
block = match l.next_block(block) {
Some(e) => e,
None => {
return errors.fatal((
loc,
format!("end of {} livein ({}) never reached", val, end_ebb),
format!("end of {} livein ({}) never reached", val, end_block),
));
}
};

View File

@@ -15,7 +15,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult};
/// instruction encoding recipes.
///
/// Values can be temporarily diverted to a different location by using the `regmove`, `regspill`,
/// and `regfill` instructions, but only inside an EBB.
/// and `regfill` instructions, but only inside an block.
///
/// If a liveness analysis is provided, it is used to verify that there are no active register
/// diversions across control flow edges.
@@ -54,11 +54,11 @@ impl<'a> LocationVerifier<'a> {
let dfg = &self.func.dfg;
let mut divert = RegDiversions::new();
for ebb in self.func.layout.ebbs() {
divert.at_ebb(&self.func.entry_diversions, ebb);
for block in self.func.layout.blocks() {
divert.at_block(&self.func.entry_diversions, block);
let mut is_after_branch = false;
for inst in self.func.layout.ebb_insts(ebb) {
for inst in self.func.layout.block_insts(block) {
let enc = self.func.encodings[inst];
if enc.is_legal() {
@@ -332,24 +332,24 @@ impl<'a> LocationVerifier<'a> {
"No branch information for {}",
dfg.display_inst(inst, self.isa)
),
SingleDest(ebb, _) => {
let unique_predecessor = self.cfg.pred_iter(ebb).count() == 1;
SingleDest(block, _) => {
let unique_predecessor = self.cfg.pred_iter(block).count() == 1;
let mut val_to_remove = vec![];
for (&value, d) in divert.iter() {
let lr = &liveness[value];
if is_after_branch && unique_predecessor {
// Forward diversions based on the targeted branch.
if !lr.is_livein(ebb, &self.func.layout) {
if !lr.is_livein(block, &self.func.layout) {
val_to_remove.push(value)
}
} else if lr.is_livein(ebb, &self.func.layout) {
} else if lr.is_livein(block, &self.func.layout) {
return errors.fatal((
inst,
format!(
"SingleDest: {} is diverted to {} and live in to {}",
value,
d.to.display(&self.reginfo),
ebb,
block,
),
));
}
@@ -358,34 +358,34 @@ impl<'a> LocationVerifier<'a> {
for val in val_to_remove.into_iter() {
divert.remove(val);
}
debug_assert!(divert.check_ebb_entry(&self.func.entry_diversions, ebb));
debug_assert!(divert.check_block_entry(&self.func.entry_diversions, block));
}
}
Table(jt, ebb) => {
Table(jt, block) => {
for (&value, d) in divert.iter() {
let lr = &liveness[value];
if let Some(ebb) = ebb {
if lr.is_livein(ebb, &self.func.layout) {
if let Some(block) = block {
if lr.is_livein(block, &self.func.layout) {
return errors.fatal((
inst,
format!(
"Table.default: {} is diverted to {} and live in to {}",
value,
d.to.display(&self.reginfo),
ebb,
block,
),
));
}
}
for ebb in self.func.jump_tables[jt].iter() {
if lr.is_livein(*ebb, &self.func.layout) {
for block in self.func.jump_tables[jt].iter() {
if lr.is_livein(*block, &self.func.layout) {
return errors.fatal((
inst,
format!(
"Table.case: {} is diverted to {} and live in to {}",
value,
d.to.display(&self.reginfo),
ebb,
block,
),
));
}

View File

@@ -1,40 +1,40 @@
//! A verifier for ensuring that functions are well formed.
//! It verifies:
//!
//! EBB integrity
//! block integrity
//!
//! - All instructions reached from the `ebb_insts` iterator must belong to
//! the EBB as reported by `inst_ebb()`.
//! - Every EBB must end in a terminator instruction, and no other instruction
//! - All instructions reached from the `block_insts` iterator must belong to
//! the block as reported by `inst_block()`.
//! - Every block must end in a terminator instruction, and no other instruction
//! can be a terminator.
//! - Every value in the `ebb_params` iterator belongs to the EBB as reported by `value_ebb`.
//! - Every value in the `block_params` iterator belongs to the block as reported by `value_block`.
//!
//! Instruction integrity
//!
//! - The instruction format must match the opcode.
//! - All result values must be created for multi-valued instructions.
//! - All referenced entities must exist. (Values, EBBs, stack slots, ...)
//! - All referenced entities must exist. (Values, blocks, stack slots, ...)
//! - Instructions must not reference (eg. branch to) the entry block.
//!
//! SSA form
//!
//! - Values must be defined by an instruction that exists and that is inserted in
//! an EBB, or be an argument of an existing EBB.
//! an block, or be an argument of an existing block.
//! - Values used by an instruction must dominate the instruction.
//!
//! Control flow graph and dominator tree integrity:
//!
//! - All predecessors in the CFG must be branches to the EBB.
//! - All branches to an EBB must be present in the CFG.
//! - All predecessors in the CFG must be branches to the block.
//! - All branches to an block must be present in the CFG.
//! - A recomputed dominator tree is identical to the existing one.
//!
//! Type checking
//!
//! - Compare input and output values against the opcode's type constraints.
//! For polymorphic opcodes, determine the controlling type variable first.
//! - Branches and jumps must pass arguments to destination EBBs that match the
//! - Branches and jumps must pass arguments to destination blocks that match the
//! expected types exactly. The number of arguments must match.
//! - All EBBs in a jump table must take no arguments.
//! - All blocks in a jump table must take no arguments.
//! - Function calls are type checked against their signature.
//! - The entry block must take arguments that match the signature of the current
//! function.
@@ -60,12 +60,12 @@ use self::flags::verify_flags;
use crate::dbg::DisplayList;
use crate::dominator_tree::DominatorTree;
use crate::entity::SparseSet;
use crate::flowgraph::{BasicBlock, ControlFlowGraph};
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir;
use crate::ir::entities::AnyEntity;
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionFormat, ResolvedConstraint};
use crate::ir::{
types, ArgumentLoc, Ebb, FuncRef, Function, GlobalValue, Inst, InstructionData, JumpTable,
types, ArgumentLoc, Block, FuncRef, Function, GlobalValue, Inst, InstructionData, JumpTable,
Opcode, SigRef, StackSlot, StackSlotKind, Type, Value, ValueDef, ValueList, ValueLoc,
};
use crate::isa::TargetIsa;
@@ -495,30 +495,30 @@ impl<'a> Verifier<'a> {
fn verify_jump_tables(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
for (jt, jt_data) in &self.func.jump_tables {
for &ebb in jt_data.iter() {
self.verify_ebb(jt, ebb, errors)?;
for &block in jt_data.iter() {
self.verify_block(jt, block, errors)?;
}
}
Ok(())
}
/// Check that the given EBB can be encoded as a BB, by checking that only
/// branching instructions are ending the EBB.
fn encodable_as_bb(&self, ebb: Ebb, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
match self.func.is_ebb_basic(ebb) {
/// Check that the given block can be encoded as a BB, by checking that only
/// branching instructions are ending the block.
fn encodable_as_bb(&self, block: Block, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
match self.func.is_block_basic(block) {
Ok(()) => Ok(()),
Err((inst, message)) => errors.fatal((inst, self.context(inst), message)),
}
}
fn ebb_integrity(
fn block_integrity(
&self,
ebb: Ebb,
block: Block,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let is_terminator = self.func.dfg[inst].opcode().is_terminator();
let is_last_inst = self.func.layout.last_inst(ebb) == Some(inst);
let is_last_inst = self.func.layout.last_inst(block) == Some(inst);
if is_terminator && !is_last_inst {
// Terminating instructions only occur at the end of blocks.
@@ -527,30 +527,30 @@ impl<'a> Verifier<'a> {
self.context(inst),
format!(
"a terminator instruction was encountered before the end of {}",
ebb
block
),
));
}
if is_last_inst && !is_terminator {
return errors.fatal((ebb, "block does not end in a terminator instruction"));
return errors.fatal((block, "block does not end in a terminator instruction"));
}
// Instructions belong to the correct ebb.
let inst_ebb = self.func.layout.inst_ebb(inst);
if inst_ebb != Some(ebb) {
// Instructions belong to the correct block.
let inst_block = self.func.layout.inst_block(inst);
if inst_block != Some(block) {
return errors.fatal((
inst,
self.context(inst),
format!("should belong to {} not {:?}", ebb, inst_ebb),
format!("should belong to {} not {:?}", block, inst_block),
));
}
// Parameters belong to the correct ebb.
for &arg in self.func.dfg.ebb_params(ebb) {
// Parameters belong to the correct block.
for &arg in self.func.dfg.block_params(block) {
match self.func.dfg.value_def(arg) {
ValueDef::Param(arg_ebb, _) => {
if ebb != arg_ebb {
return errors.fatal((arg, format!("does not belong to {}", ebb)));
ValueDef::Param(arg_block, _) => {
if block != arg_block {
return errors.fatal((arg, format!("does not belong to {}", block)));
}
}
_ => {
@@ -656,13 +656,13 @@ impl<'a> Verifier<'a> {
ref args,
..
} => {
self.verify_ebb(inst, destination, errors)?;
self.verify_block(inst, destination, errors)?;
self.verify_value_list(inst, args, errors)?;
}
BranchTable {
table, destination, ..
} => {
self.verify_ebb(inst, destination, errors)?;
self.verify_block(inst, destination, errors)?;
self.verify_jump_table(inst, table, errors)?;
}
BranchTableBase { table, .. }
@@ -775,18 +775,18 @@ impl<'a> Verifier<'a> {
Ok(())
}
fn verify_ebb(
fn verify_block(
&self,
loc: impl Into<AnyEntity>,
e: Ebb,
e: Block,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.ebb_is_valid(e) || !self.func.layout.is_ebb_inserted(e) {
return errors.fatal((loc, format!("invalid ebb reference {}", e)));
if !self.func.dfg.block_is_valid(e) || !self.func.layout.is_block_inserted(e) {
return errors.fatal((loc, format!("invalid block reference {}", e)));
}
if let Some(entry_block) = self.func.layout.entry_block() {
if e == entry_block {
return errors.fatal((loc, format!("invalid reference to entry ebb {}", e)));
return errors.fatal((loc, format!("invalid reference to entry block {}", e)));
}
}
Ok(())
@@ -947,8 +947,8 @@ impl<'a> Verifier<'a> {
self.verify_value(loc_inst, v, errors)?;
let dfg = &self.func.dfg;
let loc_ebb = self.func.layout.pp_ebb(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_ebb);
let loc_block = self.func.layout.pp_block(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_block);
// SSA form
match dfg.value_def(v) {
@@ -961,12 +961,12 @@ impl<'a> Verifier<'a> {
format!("{} is defined by invalid instruction {}", v, def_inst),
));
}
// Defining instruction is inserted in an EBB.
if self.func.layout.inst_ebb(def_inst) == None {
// Defining instruction is inserted in an block.
if self.func.layout.inst_block(def_inst) == None {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which has no EBB", v, def_inst),
format!("{} is defined by {} which has no block", v, def_inst),
));
}
// Defining instruction dominates the instruction that uses the value.
@@ -990,33 +990,33 @@ impl<'a> Verifier<'a> {
}
}
}
ValueDef::Param(ebb, _) => {
// Value is defined by an existing EBB.
if !dfg.ebb_is_valid(ebb) {
ValueDef::Param(block, _) => {
// Value is defined by an existing block.
if !dfg.block_is_valid(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid EBB {}", v, ebb),
format!("{} is defined by invalid block {}", v, block),
));
}
// Defining EBB is inserted in the layout
if !self.func.layout.is_ebb_inserted(ebb) {
// Defining block is inserted in the layout
if !self.func.layout.is_block_inserted(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which is not in the layout", v, ebb),
format!("{} is defined by {} which is not in the layout", v, block),
));
}
// The defining EBB dominates the instruction using this value.
// The defining block dominates the instruction using this value.
if is_reachable
&& !self
.expected_domtree
.dominates(ebb, loc_inst, &self.func.layout)
.dominates(block, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value arg from non-dominating {}", ebb),
format!("uses value arg from non-dominating {}", block),
));
}
}
@@ -1081,17 +1081,17 @@ impl<'a> Verifier<'a> {
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
// We consider two `DominatorTree`s to be equal if they return the same immediate
// dominator for each EBB. Therefore the current domtree is valid if it matches the freshly
// dominator for each block. Therefore the current domtree is valid if it matches the freshly
// computed one.
for ebb in self.func.layout.ebbs() {
let expected = self.expected_domtree.idom(ebb);
let got = domtree.idom(ebb);
for block in self.func.layout.blocks() {
let expected = self.expected_domtree.idom(block);
let got = domtree.idom(block);
if got != expected {
return errors.fatal((
ebb,
block,
format!(
"invalid domtree, expected idom({}) = {:?}, got {:?}",
ebb, expected, got
block, expected, got
),
));
}
@@ -1100,37 +1100,37 @@ impl<'a> Verifier<'a> {
if domtree.cfg_postorder().len() != self.expected_domtree.cfg_postorder().len() {
return errors.fatal((
AnyEntity::Function,
"incorrect number of Ebbs in postorder traversal",
"incorrect number of Blocks in postorder traversal",
));
}
for (index, (&test_ebb, &true_ebb)) in domtree
for (index, (&test_block, &true_block)) in domtree
.cfg_postorder()
.iter()
.zip(self.expected_domtree.cfg_postorder().iter())
.enumerate()
{
if test_ebb != true_ebb {
if test_block != true_block {
return errors.fatal((
test_ebb,
test_block,
format!(
"invalid domtree, postorder ebb number {} should be {}, got {}",
index, true_ebb, test_ebb
"invalid domtree, postorder block number {} should be {}, got {}",
index, true_block, test_block
),
));
}
}
// We verify rpo_cmp on pairs of adjacent ebbs in the postorder
for (&prev_ebb, &next_ebb) in domtree.cfg_postorder().iter().adjacent_pairs() {
// We verify rpo_cmp on pairs of adjacent blocks in the postorder
for (&prev_block, &next_block) in domtree.cfg_postorder().iter().adjacent_pairs() {
if self
.expected_domtree
.rpo_cmp(prev_ebb, next_ebb, &self.func.layout)
.rpo_cmp(prev_block, next_block, &self.func.layout)
!= Ordering::Greater
{
return errors.fatal((
next_ebb,
next_block,
format!(
"invalid domtree, rpo_cmp does not says {} is greater than {}",
prev_ebb, next_ebb
prev_block, next_block
),
));
}
@@ -1139,26 +1139,26 @@ impl<'a> Verifier<'a> {
}
fn typecheck_entry_block_params(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(ebb) = self.func.layout.entry_block() {
if let Some(block) = self.func.layout.entry_block() {
let expected_types = &self.func.signature.params;
let ebb_param_count = self.func.dfg.num_ebb_params(ebb);
let block_param_count = self.func.dfg.num_block_params(block);
if ebb_param_count != expected_types.len() {
if block_param_count != expected_types.len() {
return errors.fatal((
ebb,
block,
format!(
"entry block parameters ({}) must match function signature ({})",
ebb_param_count,
block_param_count,
expected_types.len()
),
));
}
for (i, &arg) in self.func.dfg.ebb_params(ebb).iter().enumerate() {
for (i, &arg) in self.func.dfg.block_params(block).iter().enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_types[i].value_type {
errors.report((
ebb,
block,
format!(
"entry block parameter {} expected to have type {}, got {}",
i, expected_types[i], arg_type
@@ -1295,38 +1295,38 @@ impl<'a> Verifier<'a> {
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
match self.func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(ebb, _) => {
BranchInfo::SingleDest(block, _) => {
let iter = self
.func
.dfg
.ebb_params(ebb)
.block_params(block)
.iter()
.map(|&v| self.func.dfg.value_type(v));
self.typecheck_variable_args_iterator(inst, iter, errors)?;
}
BranchInfo::Table(table, ebb) => {
if let Some(ebb) = ebb {
let arg_count = self.func.dfg.num_ebb_params(ebb);
BranchInfo::Table(table, block) => {
if let Some(block) = block {
let arg_count = self.func.dfg.num_block_params(block);
if arg_count != 0 {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"takes no arguments, but had target {} with {} arguments",
ebb, arg_count,
block, arg_count,
),
));
}
}
for ebb in self.func.jump_tables[table].iter() {
let arg_count = self.func.dfg.num_ebb_params(*ebb);
for block in self.func.jump_tables[table].iter() {
let arg_count = self.func.dfg.num_block_params(*block);
if arg_count != 0 {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"takes no arguments, but had target {} with {} arguments",
ebb, arg_count,
block, arg_count,
),
));
}
@@ -1658,28 +1658,29 @@ impl<'a> Verifier<'a> {
cfg: &ControlFlowGraph,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let mut expected_succs = BTreeSet::<Ebb>::new();
let mut got_succs = BTreeSet::<Ebb>::new();
let mut expected_succs = BTreeSet::<Block>::new();
let mut got_succs = BTreeSet::<Block>::new();
let mut expected_preds = BTreeSet::<Inst>::new();
let mut got_preds = BTreeSet::<Inst>::new();
for ebb in self.func.layout.ebbs() {
expected_succs.extend(self.expected_cfg.succ_iter(ebb));
got_succs.extend(cfg.succ_iter(ebb));
for block in self.func.layout.blocks() {
expected_succs.extend(self.expected_cfg.succ_iter(block));
got_succs.extend(cfg.succ_iter(block));
let missing_succs: Vec<Ebb> = expected_succs.difference(&got_succs).cloned().collect();
let missing_succs: Vec<Block> =
expected_succs.difference(&got_succs).cloned().collect();
if !missing_succs.is_empty() {
errors.report((
ebb,
block,
format!("cfg lacked the following successor(s) {:?}", missing_succs),
));
continue;
}
let excess_succs: Vec<Ebb> = got_succs.difference(&expected_succs).cloned().collect();
let excess_succs: Vec<Block> = got_succs.difference(&expected_succs).cloned().collect();
if !excess_succs.is_empty() {
errors.report((
ebb,
block,
format!("cfg had unexpected successor(s) {:?}", excess_succs),
));
continue;
@@ -1687,15 +1688,18 @@ impl<'a> Verifier<'a> {
expected_preds.extend(
self.expected_cfg
.pred_iter(ebb)
.map(|BasicBlock { inst, .. }| inst),
.pred_iter(block)
.map(|BlockPredecessor { inst, .. }| inst),
);
got_preds.extend(
cfg.pred_iter(block)
.map(|BlockPredecessor { inst, .. }| inst),
);
got_preds.extend(cfg.pred_iter(ebb).map(|BasicBlock { inst, .. }| inst));
let missing_preds: Vec<Inst> = expected_preds.difference(&got_preds).cloned().collect();
if !missing_preds.is_empty() {
errors.report((
ebb,
block,
format!(
"cfg lacked the following predecessor(s) {:?}",
missing_preds
@@ -1707,7 +1711,7 @@ impl<'a> Verifier<'a> {
let excess_preds: Vec<Inst> = got_preds.difference(&expected_preds).cloned().collect();
if !excess_preds.is_empty() {
errors.report((
ebb,
block,
format!("cfg had unexpected predecessor(s) {:?}", excess_preds),
));
continue;
@@ -1969,12 +1973,12 @@ impl<'a> Verifier<'a> {
self.typecheck_entry_block_params(errors)?;
self.typecheck_function_signature(errors)?;
for ebb in self.func.layout.ebbs() {
if self.func.layout.first_inst(ebb).is_none() {
return errors.fatal((ebb, format!("{} cannot be empty", ebb)));
for block in self.func.layout.blocks() {
if self.func.layout.first_inst(block).is_none() {
return errors.fatal((block, format!("{} cannot be empty", block)));
}
for inst in self.func.layout.ebb_insts(ebb) {
self.ebb_integrity(ebb, inst, errors)?;
for inst in self.func.layout.block_insts(block) {
self.block_integrity(block, inst, errors)?;
self.instruction_integrity(inst, errors)?;
self.verify_safepoint_unused(inst, errors)?;
self.typecheck(inst, errors)?;
@@ -1982,7 +1986,7 @@ impl<'a> Verifier<'a> {
self.immediate_constraints(inst, errors)?;
}
self.encodable_as_bb(ebb, errors)?;
self.encodable_as_bb(block, errors)?;
}
verify_flags(self.func, &self.expected_cfg, self.isa, errors)?;
@@ -2039,20 +2043,20 @@ mod tests {
#[test]
fn bad_instruction_format() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
func.layout.append_ebb(ebb0);
let block0 = func.dfg.make_block();
func.layout.append_block(block0);
let nullary_with_bad_opcode = func.dfg.make_inst(InstructionData::UnaryImm {
opcode: Opcode::F32const,
imm: 0.into(),
});
func.layout.append_inst(nullary_with_bad_opcode, ebb0);
func.layout.append_inst(nullary_with_bad_opcode, block0);
func.layout.append_inst(
func.dfg.make_inst(InstructionData::Jump {
opcode: Opcode::Jump,
destination: ebb0,
destination: block0,
args: EntityList::default(),
}),
ebb0,
block0,
);
let flags = &settings::Flags::new(settings::builder());
let verifier = Verifier::new(&func, flags.into());
@@ -2093,8 +2097,8 @@ mod tests {
fn test_printing_contextual_errors() {
// Build function.
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
func.layout.append_ebb(ebb0);
let block0 = func.dfg.make_block();
func.layout.append_block(block0);
// Build instruction: v0, v1 = iconst 42
let inst = func.dfg.make_inst(InstructionData::UnaryImm {
@@ -2103,7 +2107,7 @@ mod tests {
});
func.dfg.append_result(inst, types::I32);
func.dfg.append_result(inst, types::I32);
func.layout.append_inst(inst, ebb0);
func.layout.append_inst(inst, block0);
// Setup verifier.
let mut errors = VerifierErrors::default();
@@ -2120,16 +2124,16 @@ mod tests {
}
#[test]
fn test_empty_ebb() {
fn test_empty_block() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
func.layout.append_ebb(ebb0);
let block0 = func.dfg.make_block();
func.layout.append_block(block0);
let flags = &settings::Flags::new(settings::builder());
let verifier = Verifier::new(&func, flags.into());
let mut errors = VerifierErrors::default();
let _ = verifier.run(&mut errors);
assert_err_with_msg!(errors, "ebb0 cannot be empty");
assert_err_with_msg!(errors, "block0 cannot be empty");
}
}

View File

@@ -6,8 +6,8 @@
use crate::entity::SecondaryMap;
use crate::ir::entities::AnyEntity;
use crate::ir::{
DataFlowGraph, DisplayFunctionAnnotations, Ebb, Function, Inst, SigRef, Type, Value, ValueDef,
ValueLoc,
Block, DataFlowGraph, DisplayFunctionAnnotations, Function, Inst, SigRef, Type, Value,
ValueDef, ValueLoc,
};
use crate::isa::{RegInfo, TargetIsa};
use crate::packed_option::ReservedValue;
@@ -19,13 +19,13 @@ use core::fmt::{self, Write};
/// A `FuncWriter` used to decorate functions during printing.
pub trait FuncWriter {
/// Write the extended basic block header for the current function.
fn write_ebb_header(
/// Write the basic block header for the current function.
fn write_block_header(
&mut self,
w: &mut dyn Write,
func: &Function,
isa: Option<&dyn TargetIsa>,
ebb: Ebb,
block: Block,
indent: usize,
) -> fmt::Result;
@@ -145,15 +145,15 @@ impl FuncWriter for PlainWriter {
write_instruction(w, func, aliases, isa, inst, indent)
}
fn write_ebb_header(
fn write_block_header(
&mut self,
w: &mut dyn Write,
func: &Function,
isa: Option<&dyn TargetIsa>,
ebb: Ebb,
block: Block,
indent: usize,
) -> fmt::Result {
write_ebb_header(w, func, isa, ebb, indent)
write_block_header(w, func, isa, block, indent)
}
}
@@ -196,11 +196,11 @@ pub fn decorate_function<FW: FuncWriter>(
writeln!(w, " {{")?;
let aliases = alias_map(func);
let mut any = func_w.write_preamble(w, func, regs)?;
for ebb in &func.layout {
for block in &func.layout {
if any {
writeln!(w)?;
}
decorate_ebb(func_w, w, func, &aliases, annotations, ebb)?;
decorate_block(func_w, w, func, &aliases, annotations, block)?;
any = true;
}
writeln!(w, "}}")
@@ -235,24 +235,24 @@ fn write_arg(
/// Write out the basic block header, outdented:
///
/// ebb1:
/// ebb1(v1: i32):
/// ebb10(v4: f64, v5: b1):
/// block1:
/// block1(v1: i32):
/// block10(v4: f64, v5: b1):
///
pub fn write_ebb_header(
pub fn write_block_header(
w: &mut dyn Write,
func: &Function,
isa: Option<&dyn TargetIsa>,
ebb: Ebb,
block: Block,
indent: usize,
) -> fmt::Result {
// The `indent` is the instruction indentation. EBB headers are 4 spaces out from that.
write!(w, "{1:0$}{2}", indent - 4, "", ebb)?;
// The `indent` is the instruction indentation. block headers are 4 spaces out from that.
write!(w, "{1:0$}{2}", indent - 4, "", block)?;
let regs = isa.map(TargetIsa::register_info);
let regs = regs.as_ref();
let mut args = func.dfg.ebb_params(ebb).iter().cloned();
let mut args = func.dfg.block_params(block).iter().cloned();
match args.next() {
None => return writeln!(w, ":"),
Some(arg) => {
@@ -309,13 +309,13 @@ fn write_value_range_markers(
Ok(())
}
fn decorate_ebb<FW: FuncWriter>(
fn decorate_block<FW: FuncWriter>(
func_w: &mut FW,
w: &mut dyn Write,
func: &Function,
aliases: &SecondaryMap<Value, Vec<Value>>,
annotations: &DisplayFunctionAnnotations,
ebb: Ebb,
block: Block,
) -> fmt::Result {
// Indent all instructions if any encodings are present.
let indent = if func.encodings.is_empty() && func.srclocs.is_empty() {
@@ -325,8 +325,8 @@ fn decorate_ebb<FW: FuncWriter>(
};
let isa = annotations.isa;
func_w.write_ebb_header(w, func, isa, ebb, indent)?;
for a in func.dfg.ebb_params(ebb).iter().cloned() {
func_w.write_block_header(w, func, isa, block, indent)?;
for a in func.dfg.block_params(block).iter().cloned() {
write_value_aliases(w, aliases, a, indent)?;
}
@@ -334,7 +334,7 @@ fn decorate_ebb<FW: FuncWriter>(
if !func.offsets.is_empty() {
let encinfo = isa.encoding_info();
let regs = &isa.register_info();
for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) {
for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
func_w.write_instruction(w, func, aliases, Some(isa), inst, indent)?;
if size > 0 {
if let Some(val_ranges) = annotations.value_ranges {
@@ -346,7 +346,7 @@ fn decorate_ebb<FW: FuncWriter>(
}
}
for inst in func.layout.ebb_insts(ebb) {
for inst in func.layout.block_insts(block) {
func_w.write_instruction(w, func, aliases, isa, inst, indent)?;
}
@@ -374,11 +374,11 @@ fn type_suffix(func: &Function, inst: Inst) -> Option<Type> {
// operand, we don't need the type suffix.
if constraints.use_typevar_operand() {
let ctrl_var = inst_data.typevar_operand(&func.dfg.value_lists).unwrap();
let def_ebb = match func.dfg.value_def(ctrl_var) {
ValueDef::Result(instr, _) => func.layout.inst_ebb(instr),
ValueDef::Param(ebb, _) => Some(ebb),
let def_block = match func.dfg.value_def(ctrl_var) {
ValueDef::Result(instr, _) => func.layout.inst_block(instr),
ValueDef::Param(block, _) => Some(block),
};
if def_ebb.is_some() && def_ebb == func.layout.inst_ebb(inst) {
if def_block.is_some() && def_block == func.layout.inst_block(inst) {
return None;
}
}
@@ -533,7 +533,7 @@ pub fn write_operands(
..
} => {
write!(w, " {}", destination)?;
write_ebb_args(w, args.as_slice(pool))
write_block_args(w, args.as_slice(pool))
}
Branch {
destination,
@@ -542,7 +542,7 @@ pub fn write_operands(
} => {
let args = args.as_slice(pool);
write!(w, " {}, {}", args[0], destination)?;
write_ebb_args(w, &args[1..])
write_block_args(w, &args[1..])
}
BranchInt {
cond,
@@ -552,7 +552,7 @@ pub fn write_operands(
} => {
let args = args.as_slice(pool);
write!(w, " {} {}, {}", cond, args[0], destination)?;
write_ebb_args(w, &args[1..])
write_block_args(w, &args[1..])
}
BranchFloat {
cond,
@@ -562,7 +562,7 @@ pub fn write_operands(
} => {
let args = args.as_slice(pool);
write!(w, " {} {}, {}", cond, args[0], destination)?;
write_ebb_args(w, &args[1..])
write_block_args(w, &args[1..])
}
BranchIcmp {
cond,
@@ -572,7 +572,7 @@ pub fn write_operands(
} => {
let args = args.as_slice(pool);
write!(w, " {} {}, {}, {}", cond, args[0], args[1], destination)?;
write_ebb_args(w, &args[2..])
write_block_args(w, &args[2..])
}
BranchTable {
arg,
@@ -714,8 +714,8 @@ pub fn write_operands(
}
}
/// Write EBB args using optional parantheses.
fn write_ebb_args(w: &mut dyn Write, args: &[Value]) -> fmt::Result {
/// Write block args using optional parantheses.
fn write_block_args(w: &mut dyn Write, args: &[Value]) -> fmt::Result {
if args.is_empty() {
Ok(())
} else {
@@ -775,33 +775,33 @@ mod tests {
"function %foo() fast {\n ss0 = explicit_slot 4\n}\n"
);
let ebb = f.dfg.make_ebb();
f.layout.append_ebb(ebb);
let block = f.dfg.make_block();
f.layout.append_block(block);
assert_eq!(
f.to_string(),
"function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0:\n}\n"
"function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0:\n}\n"
);
f.dfg.append_ebb_param(ebb, types::I8);
f.dfg.append_block_param(block, types::I8);
assert_eq!(
f.to_string(),
"function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8):\n}\n"
"function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8):\n}\n"
);
f.dfg.append_ebb_param(ebb, types::F32.by(4).unwrap());
f.dfg.append_block_param(block, types::F32.by(4).unwrap());
assert_eq!(
f.to_string(),
"function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8, v1: f32x4):\n}\n"
"function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8, v1: f32x4):\n}\n"
);
{
let mut cursor = FuncCursor::new(&mut f);
cursor.set_position(CursorPosition::After(ebb));
cursor.set_position(CursorPosition::After(block));
cursor.ins().return_(&[])
};
assert_eq!(
f.to_string(),
"function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8, v1: f32x4):\n return\n}\n"
"function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8, v1: f32x4):\n return\n}\n"
);
}
@@ -811,18 +811,18 @@ mod tests {
let mut func = Function::new();
{
let ebb0 = func.dfg.make_ebb();
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
pos.insert_block(block0);
// make some detached values for change_to_alias
let v0 = pos.func.dfg.append_ebb_param(ebb0, types::I32);
let v1 = pos.func.dfg.append_ebb_param(ebb0, types::I32);
let v2 = pos.func.dfg.append_ebb_param(ebb0, types::I32);
pos.func.dfg.detach_ebb_params(ebb0);
let v0 = pos.func.dfg.append_block_param(block0, types::I32);
let v1 = pos.func.dfg.append_block_param(block0, types::I32);
let v2 = pos.func.dfg.append_block_param(block0, types::I32);
pos.func.dfg.detach_block_params(block0);
// alias to a param--will be printed at beginning of ebb defining param
let v3 = pos.func.dfg.append_ebb_param(ebb0, types::I32);
// alias to a param--will be printed at beginning of block defining param
let v3 = pos.func.dfg.append_block_param(block0, types::I32);
pos.func.dfg.change_to_alias(v0, v3);
// alias to an alias--should print attached to alias, not ultimate target
@@ -837,7 +837,7 @@ mod tests {
}
assert_eq!(
func.to_string(),
"function u0:0() fast {\nebb0(v3: i32):\n v0 -> v3\n v2 -> v0\n v4 = iconst.i32 42\n v5 = iadd v0, v0\n v1 -> v5\n v6 = iconst.i32 23\n v7 = iadd v1, v1\n}\n"
"function u0:0() fast {\nblock0(v3: i32):\n v0 -> v3\n v2 -> v0\n v4 = iconst.i32 42\n v5 = iadd v0, v0\n v1 -> v5\n v6 = iconst.i32 23\n v7 = iadd v1, v1\n}\n"
);
}
}

View File

@@ -3,14 +3,14 @@ test verifier
function %gcd(i32 uext, i32 uext) -> i32 uext system_v {
fn0 = %divmod(i32 uext, i32 uext) -> i32 uext, i32 uext
ebb1(v0: i32, v1: i32):
brz v1, ebb3
jump ebb2
block1(v0: i32, v1: i32):
brz v1, block3
jump block2
ebb2:
block2:
v2, v3 = call fn0(v0, v1)
return v2
ebb3:
block3:
return v0
}

View File

@@ -3,17 +3,17 @@ test verifier
function %average(i32, i32) -> f32 system_v {
ss0 = explicit_slot 8 ; Stack slot for ``sum``.
ebb1(v0: i32, v1: i32):
block1(v0: i32, v1: i32):
v2 = f64const 0x0.0
stack_store v2, ss0
brz v1, ebb5 ; Handle count == 0.
jump ebb2
brz v1, block5 ; Handle count == 0.
jump block2
ebb2:
block2:
v3 = iconst.i32 0
jump ebb3(v3)
jump block3(v3)
ebb3(v4: i32):
block3(v4: i32):
v5 = imul_imm v4, 4
v6 = iadd v0, v5
v7 = load.f32 v6 ; array[i]
@@ -23,17 +23,17 @@ ebb3(v4: i32):
stack_store v10, ss0
v11 = iadd_imm v4, 1
v12 = icmp ult v11, v1
brnz v12, ebb3(v11) ; Loop backedge.
jump ebb4
brnz v12, block3(v11) ; Loop backedge.
jump block4
ebb4:
block4:
v13 = stack_load.f64 ss0
v14 = fcvt_from_uint.f64 v1
v15 = fdiv v13, v14
v16 = fdemote.f32 v15
return v16
ebb5:
block5:
v100 = f32const +NaN
return v100
}

View File

@@ -6,7 +6,7 @@ function %add_members(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv2 = load.i32 notrap aligned gv0+72
heap0 = dynamic gv1, min 0x1000, bound gv2, offset_guard 0
ebb0(v0: i32, v6: i64):
block0(v0: i32, v6: i64):
v1 = heap_addr.i64 heap0, v0, 20
v2 = load.f32 v1+16
v3 = heap_addr.i64 heap0, v0, 24

View File

@@ -5,7 +5,7 @@ function %add_members(i32, i32 vmctx) -> f32 baldrdash_system_v {
gv1 = load.i32 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x10_0000, offset_guard 0x1000
ebb0(v0: i32, v5: i32):
block0(v0: i32, v5: i32):
v1 = heap_addr.i32 heap0, v0, 1
v2 = load.f32 v1+16
v3 = load.f32 v1+20

View File

@@ -5,7 +5,7 @@ function %add_members(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv1 = load.i64 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v5: i64):
block0(v0: i32, v5: i64):
v1 = heap_addr.i64 heap0, v0, 1
v2 = load.f32 v1+16
v3 = load.f32 v1+20

View File

@@ -104,7 +104,7 @@ macro_rules! entity_impl {
};
// Include basic `Display` impl using the given display prefix.
// Display an `Ebb` reference as "ebb12".
// Display a `Block` reference as "block12".
($entity:ident, $display_prefix:expr) => {
entity_impl!($entity);

View File

@@ -216,7 +216,7 @@ mod tests {
#[test]
fn pop_unordered() {
let mut ebbs = [
let mut blocks = [
E(0),
E(1),
E(6),
@@ -231,14 +231,14 @@ mod tests {
];
let mut m = EntitySet::new();
for &ebb in &ebbs {
m.insert(ebb);
for &block in &blocks {
m.insert(block);
}
assert_eq!(m.len, 13);
ebbs.sort();
blocks.sort();
for &ebb in ebbs.iter().rev() {
assert_eq!(ebb, m.pop().unwrap());
for &block in blocks.iter().rev() {
assert_eq!(block, m.pop().unwrap());
}
assert!(m.is_empty());

View File

@@ -387,7 +387,7 @@ struct FaerieRelocSink<'a> {
}
impl<'a> RelocSink for FaerieRelocSink<'a> {
fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) {
fn reloc_block(&mut self, _offset: CodeOffset, _reloc: Reloc, _block_offset: CodeOffset) {
unimplemented!();
}

View File

@@ -5,33 +5,33 @@ test verifier
function %nonsense(i32, i32) -> f32 {
; regex: I=\binst\d+\b
; check: digraph "%nonsense" {
; check: ebb0 [shape=record, label="{ebb0(v1: i32, v2: i32):
; check: | <$(BRZ=$I)>brz v2, ebb2
; nextln: | <$(JUMP0=$I)>jump ebb3
; check: block0 [shape=record, label="{block0(v1: i32, v2: i32):
; check: | <$(BRZ=$I)>brz v2, block2
; nextln: | <$(JUMP0=$I)>jump block3
; nextln: }"]
; nextln: ebb3 [shape=record, label="{ebb3:
; check: | <$(JUMP3=$I)>jump ebb1(v4)
; nextln: block3 [shape=record, label="{block3:
; check: | <$(JUMP3=$I)>jump block1(v4)
; nextln: }"]
; nextln: ebb1 [shape=record, label="{ebb1(v5: i32):
; check: | <$(BRNZ1=$I)>brnz v13, ebb1(v12)
; nextln: | <$(JUMP1=$I)>jump ebb4
; nextln: block1 [shape=record, label="{block1(v5: i32):
; check: | <$(BRNZ1=$I)>brnz v13, block1(v12)
; nextln: | <$(JUMP1=$I)>jump block4
; nextln: }"]
; nextln: ebb4 [shape=record, label="{ebb4:
; nextln: block4 [shape=record, label="{block4:
; check: | <$I>return v17
; nextln: }"]
; nextln: ebb2 [shape=record, label="{ebb2:
; nextln: block2 [shape=record, label="{block2:
; check: | <$I>return v100
; check:}"]
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v3 = f64const 0x0.0
brz v2, ebb2 ; unordered: ebb0:$BRZ -> ebb2
jump ebb3 ; unordered: ebb0:$JUMP0 -> ebb3
brz v2, block2 ; unordered: block0:$BRZ -> block2
jump block3 ; unordered: block0:$JUMP0 -> block3
ebb3:
block3:
v4 = iconst.i32 0
jump ebb1(v4) ; unordered: ebb3:$JUMP3 -> ebb1
jump block1(v4) ; unordered: block3:$JUMP3 -> block1
ebb1(v5: i32):
block1(v5: i32):
v6 = imul_imm v5, 4
v7 = iadd v1, v6
v8 = f32const 0.0
@@ -40,17 +40,17 @@ ebb1(v5: i32):
v11 = fadd v9, v10
v12 = iadd_imm v5, 1
v13 = icmp ult v12, v2
brnz v13, ebb1(v12) ; unordered: ebb1:$BRNZ1 -> ebb1
jump ebb4 ; unordered: ebb1:$JUMP1 -> ebb4
brnz v13, block1(v12) ; unordered: block1:$BRNZ1 -> block1
jump block4 ; unordered: block1:$JUMP1 -> block4
ebb4:
block4:
v14 = f64const 0.0
v15 = f64const 0.0
v16 = fdiv v14, v15
v17 = f32const 0.0
return v17
ebb2:
block2:
v100 = f32const 0.0
return v100
}

View File

@@ -6,16 +6,16 @@ test verifier
function %nonsense(i32) {
; check: digraph "%nonsense" {
ebb0(v1: i32):
block0(v1: i32):
trap user0 ; error: terminator instruction was encountered before the end
brnz v1, ebb2 ; unordered: ebb0:inst1 -> ebb2
jump ebb1 ; unordered: ebb0:inst2 -> ebb1
brnz v1, block2 ; unordered: block0:inst1 -> block2
jump block1 ; unordered: block0:inst2 -> block1
ebb1:
block1:
v2 = iconst.i32 0
v3 = iadd v1, v3
jump ebb0(v3) ; unordered: ebb1:inst5 -> ebb0
jump block0(v3) ; unordered: block1:inst5 -> block0
ebb2:
block2:
return v1
}

View File

@@ -3,25 +3,25 @@ test print-cfg
function %not_reached(i32) -> i32 {
; check: digraph "%not_reached" {
; check: ebb0 [shape=record, label="{ebb0(v0: i32):
; check: | <inst0>brnz v0, ebb2
; check: block0 [shape=record, label="{block0(v0: i32):
; check: | <inst0>brnz v0, block2
; check: | <inst1>trap user0
; check: }"]
; check: ebb1 [shape=record, label="{ebb1:
; check: | <inst4>jump ebb0(v2)
; check: block1 [shape=record, label="{block1:
; check: | <inst4>jump block0(v2)
; check: }"]
; check: ebb2 [shape=record, label="{ebb2:
; check: block2 [shape=record, label="{block2:
; check: | <inst5>return v0
; check: }"]
ebb0(v0: i32):
brnz v0, ebb2 ; unordered: ebb0:inst0 -> ebb2
block0(v0: i32):
brnz v0, block2 ; unordered: block0:inst0 -> block2
trap user0
ebb1:
block1:
v1 = iconst.i32 1
v2 = iadd v0, v1
jump ebb0(v2) ; unordered: ebb1:inst4 -> ebb0
jump block0(v2) ; unordered: block1:inst4 -> block0
ebb2:
block2:
return v0
}

View File

@@ -1,46 +1,46 @@
test dce
function %simple() -> i32 {
ebb0:
block0:
v2 = iconst.i32 2
v3 = iconst.i32 3
return v3
}
; sameln: function %simple
; nextln: ebb0:
; nextln: block0:
; nextln: v3 = iconst.i32 3
; nextln: return v3
; nextln: }
function %some_branching(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
v3 = iconst.i32 70
v4 = iconst.i32 71
v5 = iconst.i32 72
v8 = iconst.i32 73
brz v0, ebb1
jump ebb2(v8)
brz v0, block1
jump block2(v8)
ebb1:
block1:
v2 = iadd v0, v3
return v0
ebb2(v9: i32):
block2(v9: i32):
v6 = iadd v1, v4
v7 = iadd v6, v9
return v7
}
; sameln: function %some_branching
; nextln: ebb0(v0: i32, v1: i32):
; nextln: block0(v0: i32, v1: i32):
; nextln: v4 = iconst.i32 71
; nextln: v8 = iconst.i32 73
; nextln: brz v0, ebb1
; nextln: jump ebb2(v8)
; nextln: brz v0, block1
; nextln: jump block2(v8)
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: return v0
; nextln:
; nextln: ebb2(v9: i32):
; nextln: block2(v9: i32):
; nextln: v6 = iadd.i32 v1, v4
; nextln: v7 = iadd v6, v9
; nextln: return v7

View File

@@ -1,25 +1,25 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb3 ; dominates: ebb3
jump ebb2 ; dominates: ebb2
ebb2:
jump ebb3
ebb3:
block0(v0: i32):
jump block1 ; dominates: block1
block1:
brz v0, block3 ; dominates: block3
jump block2 ; dominates: block2
block2:
jump block3
block3:
return
}
; check: cfg_postorder:
; sameln: ebb2
; sameln: ebb3
; sameln: ebb1
; sameln: ebb0
; sameln: block2
; sameln: block3
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1
; nextln: ebb1: ebb3 ebb2
; nextln: ebb3:
; nextln: ebb2:
; nextln: block0: block1
; nextln: block1: block3 block2
; nextln: block3:
; nextln: block2:
; nextln: }

View File

@@ -1,118 +1,118 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb5
ebb5:
brz v0, ebb4
jump ebb6 ; dominates: ebb6
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block5
block5:
brz v0, block4
jump block6 ; dominates: block6
block6:
return
}
; Fall-through-first, prune-at-source DFT:
;
; ebb0 {
; ebb0:brz v0, ebb1 {
; ebb0:jump ebb2 {
; ebb2 {
; ebb2:brz v2, ebb2 -
; ebb2:brz v3, ebb1 -
; ebb2:brz v4, ebb4 {
; ebb2: jump ebb5 {
; ebb5: jump ebb6 {
; ebb6 {}
; block0 {
; block0:brz v0, block1 {
; block0:jump block2 {
; block2 {
; block2:brz v2, block2 -
; block2:brz v3, block1 -
; block2:brz v4, block4 {
; block2: jump block5 {
; block5: jump block6 {
; block6 {}
; }
; }
; ebb4 {}
; block4 {}
; }
; } ebb2
; } block2
; }
; ebb1 {
; ebb1:jump ebb3 {
; ebb3 {}
; block1 {
; block1:jump block3 {
; block3 {}
; }
; } ebb1
; } block1
; }
; } ebb0
; } block0
;
; check: cfg_postorder:
; sameln: ebb6
; sameln: ebb5
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block6
; sameln: block5
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4:
; nextln: ebb3:
; nextln: ebb5: ebb6
; nextln: ebb6:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4:
; nextln: block3:
; nextln: block5: block6
; nextln: block6:
; nextln: }
function %loop2(i32) system_v {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb8 ; dominates: ebb8
ebb8:
brnz v0, ebb5
jump ebb6 ; dominates: ebb6
ebb5:
brz v0, ebb4
jump ebb9 ; dominates: ebb9
ebb9:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block8 ; dominates: block8
block8:
brnz v0, block5
jump block6 ; dominates: block6
block5:
brz v0, block4
jump block9 ; dominates: block9
block9:
trap user0
ebb6:
jump ebb7 ; dominates: ebb7
ebb7:
block6:
jump block7 ; dominates: block7
block7:
return
}
; check: cfg_postorder:
; sameln: ebb9
; sameln: ebb5
; sameln: ebb7
; sameln: ebb6
; sameln: ebb8
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block9
; sameln: block5
; sameln: block7
; sameln: block6
; sameln: block8
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4: ebb8
; nextln: ebb8: ebb6
; nextln: ebb6: ebb7
; nextln: ebb7:
; nextln: ebb3:
; nextln: ebb5: ebb9
; nextln: ebb9:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4: block8
; nextln: block8: block6
; nextln: block6: block7
; nextln: block7:
; nextln: block3:
; nextln: block5: block9
; nextln: block9:
; nextln: }

View File

@@ -1,92 +1,92 @@
test domtree
function %loop1(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb6
jump ebb10 ; dominates: ebb10
ebb10:
brnz v0, ebb2 ; dominates: ebb2 ebb9
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb6
ebb2:
brz v0, ebb4 ; dominates: ebb4 ebb7 ebb8
jump ebb5 ; dominates: ebb5
ebb3:
jump ebb9
ebb4:
brz v0, ebb4
jump ebb11 ; dominates: ebb11
ebb11:
brnz v0, ebb6
jump ebb7
ebb5:
brz v0, ebb7
jump ebb12 ; dominates: ebb12
ebb12:
brnz v0, ebb8
jump ebb9
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block6
jump block10 ; dominates: block10
block10:
brnz v0, block2 ; dominates: block2 block9
jump block3 ; dominates: block3
block1:
jump block6
block2:
brz v0, block4 ; dominates: block4 block7 block8
jump block5 ; dominates: block5
block3:
jump block9
block4:
brz v0, block4
jump block11 ; dominates: block11
block11:
brnz v0, block6
jump block7
block5:
brz v0, block7
jump block12 ; dominates: block12
block12:
brnz v0, block8
jump block9
block6:
return
ebb7:
jump ebb8
ebb8:
block7:
jump block8
block8:
return
ebb9:
block9:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb10 ebb6
; nextln: ebb1:
; nextln: ebb10: ebb2 ebb3 ebb9
; nextln: ebb2: ebb4 ebb5 ebb7 ebb8
; nextln: ebb4: ebb11
; nextln: ebb11:
; nextln: ebb5: ebb12
; nextln: ebb12:
; nextln: ebb7:
; nextln: ebb8:
; nextln: ebb3:
; nextln: ebb9:
; nextln: ebb6:
; nextln: block0: block1 block10 block6
; nextln: block1:
; nextln: block10: block2 block3 block9
; nextln: block2: block4 block5 block7 block8
; nextln: block4: block11
; nextln: block11:
; nextln: block5: block12
; nextln: block12:
; nextln: block7:
; nextln: block8:
; nextln: block3:
; nextln: block9:
; nextln: block6:
; nextln: }
function %loop2(i32) system_v {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb5
ebb5:
brz v0, ebb4
jump ebb6 ; dominates: ebb6
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block5
block5:
brz v0, block4
jump block6 ; dominates: block6
block6:
return
}
; check: cfg_postorder:
; sameln: ebb6
; sameln: ebb5
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block6
; sameln: block5
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4:
; nextln: ebb3:
; nextln: ebb5: ebb6
; nextln: ebb6:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4:
; nextln: block3:
; nextln: block5: block6
; nextln: block6:
; nextln: }

View File

@@ -1,54 +1,54 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1
jump ebb12 ; dominates: ebb12
ebb12:
brnz v0, ebb2 ; dominates: ebb2 ebb5
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb4 ; dominates: ebb4
ebb2:
jump ebb5
ebb3:
jump ebb5
ebb4:
brz v0, ebb6 ; dominates: ebb6 ebb10
jump ebb7 ; dominates: ebb7
ebb5:
block0(v0: i32):
brz v0, block1 ; dominates: block1
jump block12 ; dominates: block12
block12:
brnz v0, block2 ; dominates: block2 block5
jump block3 ; dominates: block3
block1:
jump block4 ; dominates: block4
block2:
jump block5
block3:
jump block5
block4:
brz v0, block6 ; dominates: block6 block10
jump block7 ; dominates: block7
block5:
return
ebb6:
brz v0, ebb8 ; dominates: ebb11 ebb8
jump ebb13 ; dominates: ebb13
ebb13:
brnz v0, ebb9 ; dominates: ebb9
jump ebb10
ebb7:
jump ebb10
ebb8:
jump ebb11
ebb9:
jump ebb11
ebb10:
block6:
brz v0, block8 ; dominates: block11 block8
jump block13 ; dominates: block13
block13:
brnz v0, block9 ; dominates: block9
jump block10
block7:
jump block10
block8:
jump block11
block9:
jump block11
block10:
return
ebb11:
block11:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb12
; nextln: ebb1: ebb4
; nextln: ebb4: ebb6 ebb7 ebb10
; nextln: ebb6: ebb8 ebb13 ebb11
; nextln: ebb8:
; nextln: ebb13: ebb9
; nextln: ebb9:
; nextln: ebb11:
; nextln: ebb7:
; nextln: ebb10:
; nextln: ebb12: ebb2 ebb3 ebb5
; nextln: ebb2:
; nextln: ebb3:
; nextln: ebb5:
; nextln: block0: block1 block12
; nextln: block1: block4
; nextln: block4: block6 block7 block10
; nextln: block6: block8 block13 block11
; nextln: block8:
; nextln: block13: block9
; nextln: block9:
; nextln: block11:
; nextln: block7:
; nextln: block10:
; nextln: block12: block2 block3 block5
; nextln: block2:
; nextln: block3:
; nextln: block5:
; nextln: }

View File

@@ -1,73 +1,73 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb13 ; dominates: ebb13
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb2 ; dominates: ebb2 ebb7
jump ebb20 ; dominates: ebb20
ebb20:
brnz v0, ebb3 ; dominates: ebb3
jump ebb21 ; dominates: ebb21
ebb21:
brz v0, ebb4 ; dominates: ebb4
jump ebb22 ; dominates: ebb22
ebb22:
brnz v0, ebb5 ; dominates: ebb5
jump ebb6 ; dominates: ebb6
ebb2:
jump ebb7
ebb3:
jump ebb7
ebb4:
jump ebb7
ebb5:
jump ebb7
ebb6:
jump ebb7
ebb7:
brnz v0, ebb8 ; dominates: ebb8 ebb12
jump ebb23 ; dominates: ebb23
ebb23:
brz v0, ebb9 ; dominates: ebb9
jump ebb24 ; dominates: ebb24
ebb24:
brnz v0, ebb10 ; dominates: ebb10
jump ebb11 ; dominates: ebb11
ebb8:
jump ebb12
ebb9:
jump ebb12
ebb10:
brz v0, ebb13
jump ebb12
ebb11:
jump ebb13
ebb12:
block0(v0: i32):
brz v0, block13 ; dominates: block13
jump block1 ; dominates: block1
block1:
brz v0, block2 ; dominates: block2 block7
jump block20 ; dominates: block20
block20:
brnz v0, block3 ; dominates: block3
jump block21 ; dominates: block21
block21:
brz v0, block4 ; dominates: block4
jump block22 ; dominates: block22
block22:
brnz v0, block5 ; dominates: block5
jump block6 ; dominates: block6
block2:
jump block7
block3:
jump block7
block4:
jump block7
block5:
jump block7
block6:
jump block7
block7:
brnz v0, block8 ; dominates: block8 block12
jump block23 ; dominates: block23
block23:
brz v0, block9 ; dominates: block9
jump block24 ; dominates: block24
block24:
brnz v0, block10 ; dominates: block10
jump block11 ; dominates: block11
block8:
jump block12
block9:
jump block12
block10:
brz v0, block13
jump block12
block11:
jump block13
block12:
return
ebb13:
block13:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb13 ebb1
; nextln: ebb13:
; nextln: ebb1: ebb2 ebb20 ebb7
; nextln: ebb2:
; nextln: ebb20: ebb3 ebb21
; nextln: ebb3:
; nextln: ebb21: ebb4 ebb22
; nextln: ebb4:
; nextln: ebb22: ebb5 ebb6
; nextln: ebb5:
; nextln: ebb6:
; nextln: ebb7: ebb8 ebb23 ebb12
; nextln: ebb8:
; nextln: ebb23: ebb9 ebb24
; nextln: ebb9:
; nextln: ebb24: ebb10 ebb11
; nextln: ebb10:
; nextln: ebb11:
; nextln: ebb12:
; nextln: block0: block13 block1
; nextln: block13:
; nextln: block1: block2 block20 block7
; nextln: block2:
; nextln: block20: block3 block21
; nextln: block3:
; nextln: block21: block4 block22
; nextln: block4:
; nextln: block22: block5 block6
; nextln: block5:
; nextln: block6:
; nextln: block7: block8 block23 block12
; nextln: block8:
; nextln: block23: block9 block24
; nextln: block9:
; nextln: block24: block10 block11
; nextln: block10:
; nextln: block11:
; nextln: block12:
; nextln: }

View File

@@ -9,6 +9,6 @@ function %f() {
; available in RV32E.
sig0 = (i64, i64, i64, i64) -> i64 system_v
; check: sig0 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11] system_v
ebb0:
block0:
return
}

View File

@@ -27,6 +27,6 @@ function %f() {
sig5 = (i64x4) system_v
; check: sig5 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17]) system_v
ebb0:
block0:
return
}

View File

@@ -6,7 +6,7 @@ function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
sig0 = ()
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x21] v2 = iconst.i32 2
@@ -94,96 +94,96 @@ ebb0(v9999: i32):
call_indirect sig0, v1() ; bin: 000500e7
call_indirect sig0, v2() ; bin: 000a80e7
brz v1, ebb3
fallthrough ebb4
brz v1, block3
fallthrough block4
ebb4:
brnz v1, ebb1
fallthrough ebb5
block4:
brnz v1, block1
fallthrough block5
ebb5:
block5:
; jalr %x0, %x1, 0
return v9999 ; bin: 00008067
ebb1:
block1:
; beq 0x000
br_icmp eq v1, v2, ebb1 ; bin: 01550063
fallthrough ebb100
br_icmp eq v1, v2, block1 ; bin: 01550063
fallthrough block100
ebb100:
block100:
; bne 0xffc
br_icmp ne v1, v2, ebb1 ; bin: ff551ee3
fallthrough ebb101
br_icmp ne v1, v2, block1 ; bin: ff551ee3
fallthrough block101
ebb101:
block101:
; blt 0xff8
br_icmp slt v1, v2, ebb1 ; bin: ff554ce3
fallthrough ebb102
br_icmp slt v1, v2, block1 ; bin: ff554ce3
fallthrough block102
ebb102:
block102:
; bge 0xff4
br_icmp sge v1, v2, ebb1 ; bin: ff555ae3
fallthrough ebb103
br_icmp sge v1, v2, block1 ; bin: ff555ae3
fallthrough block103
ebb103:
block103:
; bltu 0xff0
br_icmp ult v1, v2, ebb1 ; bin: ff5568e3
fallthrough ebb104
br_icmp ult v1, v2, block1 ; bin: ff5568e3
fallthrough block104
ebb104:
block104:
; bgeu 0xfec
br_icmp uge v1, v2, ebb1 ; bin: ff5576e3
fallthrough ebb105
br_icmp uge v1, v2, block1 ; bin: ff5576e3
fallthrough block105
ebb105:
block105:
; Forward branches.
fallthrough ebb106
fallthrough block106
ebb106:
block106:
; beq 0x018
br_icmp eq v2, v1, ebb2 ; bin: 00aa8c63
fallthrough ebb107
br_icmp eq v2, v1, block2 ; bin: 00aa8c63
fallthrough block107
ebb107:
block107:
; bne 0x014
br_icmp ne v2, v1, ebb2 ; bin: 00aa9a63
fallthrough ebb108
br_icmp ne v2, v1, block2 ; bin: 00aa9a63
fallthrough block108
ebb108:
block108:
; blt 0x010
br_icmp slt v2, v1, ebb2 ; bin: 00aac863
fallthrough ebb109
br_icmp slt v2, v1, block2 ; bin: 00aac863
fallthrough block109
ebb109:
block109:
; bge 0x00c
br_icmp sge v2, v1, ebb2 ; bin: 00aad663
fallthrough ebb110
br_icmp sge v2, v1, block2 ; bin: 00aad663
fallthrough block110
ebb110:
block110:
; bltu 0x008
br_icmp ult v2, v1, ebb2 ; bin: 00aae463
fallthrough ebb111
br_icmp ult v2, v1, block2 ; bin: 00aae463
fallthrough block111
ebb111:
block111:
; bgeu 0x004
br_icmp uge v2, v1, ebb2 ; bin: 00aaf263
br_icmp uge v2, v1, block2 ; bin: 00aaf263
fallthrough ebb2
fallthrough block2
ebb2:
block2:
; jal %x0, 0x00000
jump ebb2 ; bin: 0000006f
jump block2 ; bin: 0000006f
ebb3:
block3:
; beq x, %x0
brz v1, ebb3 ; bin: 00050063
fallthrough ebb6
brz v1, block3 ; bin: 00050063
fallthrough block6
ebb6:
block6:
; bne x, %x0
brnz v1, ebb3 ; bin: fe051ee3
brnz v1, block3 ; bin: fe051ee3
; jal %x0, 0x1ffff4
jump ebb2 ; bin: ff5ff06f
jump block2 ; bin: ff5ff06f
}

View File

@@ -2,7 +2,7 @@ test legalizer
target riscv32 supports_m=1
function %int32(i32, i32) {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v10 = iadd v1, v2
; check: [R#0c]
; sameln: v10 = iadd

View File

@@ -8,7 +8,7 @@ target riscv64 supports_m=1
; regex: V=v\d+
function %carry_out(i32, i32) -> i32, b1 {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v3, v4 = iadd_cout v1, v2
return v3, v4
}
@@ -19,7 +19,7 @@ ebb0(v1: i32, v2: i32):
; Expanding illegal immediate constants.
; Note that at some point we'll probably expand the iconst as well.
function %large_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iadd_imm v0, 1000000000
return v1
}
@@ -28,7 +28,7 @@ ebb0(v0: i32):
; check: return v1
function %bitclear(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
v2 = band_not v0, v1
; check: iconst.i32 -1
; check: bxor

View File

@@ -7,8 +7,8 @@ target riscv32
; regex: WS=\s+
function %int_split_args(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
block0(v0: i64):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: v0 = iconcat $v0l, $v0h
v1 = iadd_imm v0, 1
; check: $(v1l=$V), $(v1h=$V) = isplit v1
@@ -19,7 +19,7 @@ ebb0(v0: i64):
function %split_call_arg(i32) {
fn1 = %foo(i64)
fn2 = %foo(i32, i64)
ebb0(v0: i32):
block0(v0: i32):
v1 = uextend.i64 v0
call fn1(v1)
; check: $(v1l=$V), $(v1h=$V) = isplit v1
@@ -31,36 +31,36 @@ ebb0(v0: i32):
function %split_ret_val() {
fn1 = %foo() -> i64
ebb0:
block0:
v1 = call fn1()
; check: ebb0($(link=$V): i32):
; check: block0($(link=$V): i32):
; nextln: $(v1l=$V), $(v1h=$V) = call fn1()
; check: v1 = iconcat $v1l, $v1h
jump ebb1(v1)
; check: jump ebb1(v1)
jump block1(v1)
; check: jump block1(v1)
ebb1(v10: i64):
jump ebb1(v10)
block1(v10: i64):
jump block1(v10)
}
; First return value is fine, second one is expanded.
function %split_ret_val2() {
fn1 = %foo() -> i32, i64
ebb0:
block0:
v1, v2 = call fn1()
; check: ebb0($(link=$V): i32):
; check: block0($(link=$V): i32):
; nextln: v1, $(v2l=$V), $(v2h=$V) = call fn1()
; check: v2 = iconcat $v2l, $v2h
jump ebb1(v1, v2)
; check: jump ebb1(v1, v2)
jump block1(v1, v2)
; check: jump block1(v1, v2)
ebb1(v9: i32, v10: i64):
jump ebb1(v9, v10)
block1(v9: i32, v10: i64):
jump block1(v9, v10)
}
function %int_ext(i8, i8 sext, i8 uext) -> i8 uext {
ebb0(v1: i8, v2: i8, v3: i8):
; check: ebb0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
block0(v1: i8, v2: i8, v3: i8):
; check: block0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
; check: v2 = ireduce.i8 $v2x
; check: v3 = ireduce.i8 $v3x
; check: $(v1x=$V) = uextend.i32 v1
@@ -71,21 +71,21 @@ ebb0(v1: i8, v2: i8, v3: i8):
; Function produces single return value, still need to copy.
function %ext_ret_val() {
fn1 = %foo() -> i8 sext
ebb0:
block0:
v1 = call fn1()
; check: ebb0($V: i32):
; check: block0($V: i32):
; nextln: $(rv=$V) = call fn1()
; check: v1 = ireduce.i8 $rv
jump ebb1(v1)
; check: jump ebb1(v1)
jump block1(v1)
; check: jump block1(v1)
ebb1(v10: i8):
jump ebb1(v10)
block1(v10: i8):
jump block1(v10)
}
function %vector_split_args(i64x4) -> i64x4 {
ebb0(v0: i64x4):
; check: ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
block0(v0: i64x4):
; check: block0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
; check: $(v0a=$V) = iconcat $v0al, $v0ah
; check: $(v0b=$V) = iconcat $v0bl, $v0bh
; check: $(v0ab=$V) = vconcat $v0a, $v0b
@@ -107,7 +107,7 @@ ebb0(v0: i64x4):
function %indirect(i32) {
sig1 = () system_v
ebb0(v0: i32):
block0(v0: i32):
call_indirect sig1, v0()
return
}
@@ -115,7 +115,7 @@ ebb0(v0: i32):
; The first argument to call_indirect doesn't get altered.
function %indirect_arg(i32, f32x2) {
sig1 = (f32x2) system_v
ebb0(v0: i32, v1: f32x2):
block0(v0: i32, v1: f32x2):
call_indirect sig1, v0(v1)
; check: call_indirect sig1, v0($V, $V)
return
@@ -125,7 +125,7 @@ ebb0(v0: i32, v1: f32x2):
function %stack_args(i32) {
; check: $(ss0=$SS) = outgoing_arg 4
fn1 = %foo(i64, i64, i64, i64, i32)
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i64 1
call fn1(v1, v1, v1, v1, v0)
; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill v0

View File

@@ -5,11 +5,11 @@ target riscv32 supports_m=1
; regex: V=v\d+
function %bitwise_and(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = band v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#ec
; sameln: $(v3l=$V) = band $v1l, $v2l
; check: [R#ec
@@ -18,11 +18,11 @@ ebb0(v1: i64, v2: i64):
; check: return $v3l, $v3h, $link
function %bitwise_or(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = bor v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#cc
; sameln: $(v3l=$V) = bor $v1l, $v2l
; check: [R#cc
@@ -31,11 +31,11 @@ ebb0(v1: i64, v2: i64):
; check: return $v3l, $v3h, $link
function %bitwise_xor(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = bxor v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#8c
; sameln: $(v3l=$V) = bxor $v1l, $v2l
; check: [R#8c
@@ -47,11 +47,11 @@ function %arith_add(i64, i64) -> i64 {
; Legalizing iadd.i64 requires two steps:
; 1. Narrow to iadd_cout.i32, then
; 2. Expand iadd_cout.i32 since RISC-V has no carry flag.
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = iadd v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#0c
; sameln: $(v3l=$V) = iadd $v1l, $v2l
; check: $(c=$V) = icmp ult $v3l, $v1l

View File

@@ -4,11 +4,11 @@ target riscv32
; regex: V=v\d+
function %icmp_imm_eq(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm eq v0, 0x20202020_10101010
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)
@@ -20,11 +20,11 @@ ebb0(v0: i64):
; nextln: return v1, $(link)
function %icmp_imm_ne(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm ne v0, 0x33333333_44444444
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)
@@ -36,11 +36,11 @@ ebb0(v0: i64):
; nextln: return v1, $(link)
function %icmp_imm_sge(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm sge v0, 0x01020304_05060708
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)

View File

@@ -31,6 +31,6 @@ function %parse_encoding(i32 [%x5]) -> i32 [%x10] {
; check: sig6 = (i32 [%x10]) -> b1 [%x10] system_v
; nextln: fn0 = %bar sig6
ebb0(v0: i32):
block0(v0: i32):
return v0
}

View File

@@ -3,7 +3,7 @@ test binemit
target riscv32
function %regmoves(i32 link [%x1]) -> i32 link [%x1] {
ebb0(v9999: i32):
block0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x7] v2 = iadd_imm v1, 1000 ; bin: 3e850393
regmove v1, %x10 -> %x11 ; bin: 00050593

View File

@@ -1,17 +1,17 @@
; Test the legalization of EBB arguments that are split.
; Test the legalization of block arguments that are split.
test legalizer
target riscv32
; regex: V=v\d+
function %simple(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump ebb1($v1l, $v1h)
block0(v1: i64, v2: i64):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump block1(v1)
; check: jump block1($v1l, $v1h)
ebb1(v3: i64):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
block1(v3: i64):
; check: block1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
@@ -20,18 +20,18 @@ ebb1(v3: i64):
}
function %multi(i64) -> i64 {
ebb1(v1: i64):
; check: ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump ebb2(v1, v1)
; check: jump ebb2($v1l, $v1l, $v1h, $v1h)
block1(v1: i64):
; check: block1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump block2(v1, v1)
; check: jump block2($v1l, $v1l, $v1h, $v1h)
ebb2(v2: i64, v3: i64):
; check: ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump ebb3(v2)
; check: jump ebb3($v2l, $v2h)
block2(v2: i64, v3: i64):
; check: block2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump block3(v2)
; check: jump block3($v2l, $v2h)
ebb3(v4: i64):
; check: ebb3($(v4l=$V): i32, $(v4h=$V): i32):
block3(v4: i64):
; check: block3($(v4l=$V): i32, $(v4h=$V): i32):
v5 = band v4, v3
; check: $(v5l=$V) = band $v4l, $v3l
; check: $(v5h=$V) = band $v4h, $v3h
@@ -40,16 +40,16 @@ ebb3(v4: i64):
}
function %loop(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump ebb1($v1l, $v1h)
block0(v1: i64, v2: i64):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump block1(v1)
; check: jump block1($v1l, $v1h)
ebb1(v3: i64):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
block1(v3: i64):
; check: block1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
jump ebb1(v4)
; check: jump ebb1($v4l, $v4h)
jump block1(v4)
; check: jump block1($v4l, $v4h)
}

View File

@@ -4,7 +4,7 @@ target riscv32
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
; iconst.i32 needs legalizing, so it should throw a
[R#0,-] v1 = iconst.i32 0xf0f0f0f0f0 ; error: Instruction failed to re-encode
[Iret#19] return v9999
@@ -13,7 +13,7 @@ ebb0(v9999: i32):
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
[R#0,-] v3 = iadd v1, v2 ; error: encoding R#00 should be R#0c

View File

@@ -5,7 +5,7 @@ target i686
; allocator can move it to a register that can be.
function %test(i32 [%rdi]) -> i32 system_v {
ebb0(v0: i32 [%rdi]):
block0(v0: i32 [%rdi]):
v1 = ireduce.i8 v0
v2 = sextend.i32 v1
return v2

View File

@@ -2,18 +2,18 @@ test compile
target x86_64 haswell
function %foo(i64, i64, i64, i32) -> b1 system_v {
ebb3(v0: i64, v1: i64, v2: i64, v3: i32):
block3(v0: i64, v1: i64, v2: i64, v3: i32):
v5 = icmp ne v2, v2
v8 = iconst.i64 0
jump ebb2(v8, v3, v5)
jump block2(v8, v3, v5)
ebb2(v10: i64, v30: i32, v37: b1):
block2(v10: i64, v30: i32, v37: b1):
v18 = load.i32 notrap aligned v2
v27 = iadd.i64 v10, v10
v31 = icmp eq v30, v30
brz v31, ebb2(v27, v30, v37)
jump ebb0(v37)
brz v31, block2(v27, v30, v37)
jump block0(v37)
ebb0(v35: b1):
block0(v35: b1):
return v35
}

Some files were not shown because too many files have changed in this diff Show More