Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -74,8 +74,8 @@ impl<'a> MemoryCodeSink<'a> {
/// A trait for receiving relocations for code that is emitted directly into memory.
pub trait RelocSink {
/// Add a relocation referencing an EBB at the current offset.
fn reloc_ebb(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an block at the current offset.
fn reloc_block(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an external symbol at the current offset.
fn reloc_external(&mut self, _: CodeOffset, _: Reloc, _: &ExternalName, _: Addend);
@@ -127,9 +127,9 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
self.write(x);
}
fn reloc_ebb(&mut self, rel: Reloc, ebb_offset: CodeOffset) {
fn reloc_block(&mut self, rel: Reloc, block_offset: CodeOffset) {
let ofs = self.offset();
self.relocs.reloc_ebb(ofs, rel, ebb_offset);
self.relocs.reloc_block(ofs, rel, block_offset);
}
fn reloc_external(&mut self, rel: Reloc, name: &ExternalName, addend: Addend) {
@@ -177,7 +177,7 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
pub struct NullRelocSink {}
impl RelocSink for NullRelocSink {
fn reloc_ebb(&mut self, _: u32, _: Reloc, _: u32) {}
fn reloc_block(&mut self, _: u32, _: Reloc, _: u32) {}
fn reloc_external(&mut self, _: u32, _: Reloc, _: &ExternalName, _: i64) {}
fn reloc_constant(&mut self, _: CodeOffset, _: Reloc, _: ConstantOffset) {}
fn reloc_jt(&mut self, _: u32, _: Reloc, _: JumpTable) {}

View File

@@ -127,8 +127,8 @@ pub trait CodeSink {
/// Add 8 bytes to the code section.
fn put8(&mut self, _: u64);
/// Add a relocation referencing an EBB at the current offset.
fn reloc_ebb(&mut self, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an block at the current offset.
fn reloc_block(&mut self, _: Reloc, _: CodeOffset);
/// Add a relocation referencing an external symbol plus the addend at the current offset.
fn reloc_external(&mut self, _: Reloc, _: &ExternalName, _: Addend);
@@ -205,10 +205,10 @@ where
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS, &dyn TargetIsa),
{
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
divert.at_ebb(&func.entry_diversions, ebb);
debug_assert_eq!(func.offsets[ebb], sink.offset());
for inst in func.layout.ebb_insts(ebb) {
for block in func.layout.blocks() {
divert.at_block(&func.entry_diversions, block);
debug_assert_eq!(func.offsets[block], sink.offset());
for inst in func.layout.block_insts(block) {
emit_inst(func, inst, &mut divert, sink, isa);
}
}
@@ -218,8 +218,8 @@ where
// Output jump tables.
for (jt, jt_data) in func.jump_tables.iter() {
let jt_offset = func.jt_offsets[jt];
for ebb in jt_data.iter() {
let rel_offset: i32 = func.offsets[*ebb] as i32 - jt_offset as i32;
for block in jt_data.iter() {
let rel_offset: i32 = func.offsets[*block] as i32 - jt_offset as i32;
sink.put4(rel_offset as u32)
}
}

View File

@@ -1,9 +1,9 @@
//! Branch relaxation and offset computation.
//!
//! # EBB header offsets
//! # block header offsets
//!
//! Before we can generate binary machine code for branch instructions, we need to know the final
//! offsets of all the EBB headers in the function. This information is encoded in the
//! offsets of all the block headers in the function. This information is encoded in the
//! `func.offsets` table.
//!
//! # Branch relaxation
@@ -16,22 +16,22 @@
//! unconditional branches:
//!
//! ```clif
//! brz v1, ebb17
//! brz v1, block17
//! ```
//!
//! can be transformed into:
//!
//! ```clif
//! brnz v1, ebb23
//! jump ebb17
//! ebb23:
//! brnz v1, block23
//! jump block17
//! block23:
//! ```
use crate::binemit::{CodeInfo, CodeOffset};
use crate::cursor::{Cursor, FuncCursor};
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueList};
use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueList};
use crate::isa::{EncInfo, TargetIsa};
use crate::iterators::IteratorExtras;
use crate::regalloc::RegDiversions;
@@ -40,7 +40,7 @@ use crate::CodegenResult;
use core::convert::TryFrom;
use log::debug;
/// Relax branches and compute the final layout of EBB headers in `func`.
/// Relax branches and compute the final layout of block headers in `func`.
///
/// Fill in the `func.offsets` table so the function is ready for binary emission.
pub fn relax_branches(
@@ -53,9 +53,9 @@ pub fn relax_branches(
let encinfo = isa.encoding_info();
// Clear all offsets so we can recognize EBBs that haven't been visited yet.
// Clear all offsets so we can recognize blocks that haven't been visited yet.
func.offsets.clear();
func.offsets.resize(func.dfg.num_ebbs());
func.offsets.resize(func.dfg.num_blocks());
// Start by removing redundant jumps.
fold_redundant_jumps(func, _cfg, _domtree);
@@ -66,12 +66,12 @@ pub fn relax_branches(
let mut offset = 0;
let mut divert = RegDiversions::new();
// First, compute initial offsets for every EBB.
// First, compute initial offsets for every block.
{
let mut cur = FuncCursor::new(func);
while let Some(ebb) = cur.next_ebb() {
divert.at_ebb(&cur.func.entry_diversions, ebb);
cur.func.offsets[ebb] = offset;
while let Some(block) = cur.next_block() {
divert.at_block(&cur.func.entry_diversions, block);
cur.func.offsets[block] = offset;
while let Some(inst) = cur.next_inst() {
divert.apply(&cur.func.dfg[inst]);
let enc = cur.func.encodings[inst];
@@ -88,12 +88,12 @@ pub fn relax_branches(
// Visit all instructions in layout order.
let mut cur = FuncCursor::new(func);
while let Some(ebb) = cur.next_ebb() {
divert.at_ebb(&cur.func.entry_diversions, ebb);
while let Some(block) = cur.next_block() {
divert.at_block(&cur.func.entry_diversions, block);
// Record the offset for `ebb` and make sure we iterate until offsets are stable.
if cur.func.offsets[ebb] != offset {
cur.func.offsets[ebb] = offset;
// Record the offset for `block` and make sure we iterate until offsets are stable.
if cur.func.offsets[block] != offset {
cur.func.offsets[block] = offset;
go_again = true;
}
@@ -153,21 +153,21 @@ pub fn relax_branches(
fn try_fold_redundant_jump(
func: &mut Function,
cfg: &mut ControlFlowGraph,
ebb: Ebb,
block: Block,
first_inst: Inst,
) -> bool {
let first_dest = match func.dfg[first_inst].branch_destination() {
Some(ebb) => ebb, // The instruction was a single-target branch.
Some(block) => block, // The instruction was a single-target branch.
None => {
return false; // The instruction was either multi-target or not a branch.
}
};
// For the moment, only attempt to fold a branch to an ebb that is parameterless.
// For the moment, only attempt to fold a branch to an block that is parameterless.
// These blocks are mainly produced by critical edge splitting.
//
// TODO: Allow folding blocks that define SSA values and function as phi nodes.
if func.dfg.num_ebb_params(first_dest) != 0 {
if func.dfg.num_block_params(first_dest) != 0 {
return false;
}
@@ -178,7 +178,7 @@ fn try_fold_redundant_jump(
return false;
}
// Now we need to fix up first_inst's ebb parameters to match second_inst's,
// Now we need to fix up first_inst's block parameters to match second_inst's,
// without changing the branch-specific arguments.
//
// The intermediary block is allowed to reference any SSA value that dominates it,
@@ -208,14 +208,14 @@ fn try_fold_redundant_jump(
// was a block parameter, rewrite it to refer to the value that the first jump
// passed in its parameters. Otherwise, make sure it dominates first_inst.
//
// For example: if we `ebb0: jump ebb1(v1)` to `ebb1(v2): jump ebb2(v2)`,
// we want to rewrite the original jump to `jump ebb2(v1)`.
let ebb_params: &[Value] = func.dfg.ebb_params(first_dest);
debug_assert!(ebb_params.len() == first_params.len());
// For example: if we `block0: jump block1(v1)` to `block1(v2): jump block2(v2)`,
// we want to rewrite the original jump to `jump block2(v1)`.
let block_params: &[Value] = func.dfg.block_params(first_dest);
debug_assert!(block_params.len() == first_params.len());
for value in second_params.iter_mut() {
if let Some((n, _)) = ebb_params.iter().enumerate().find(|(_, &p)| p == *value) {
// This value was the Nth parameter passed to the second_inst's ebb.
if let Some((n, _)) = block_params.iter().enumerate().find(|(_, &p)| p == *value) {
// This value was the Nth parameter passed to the second_inst's block.
// Rewrite it as the Nth parameter passed by first_inst.
*value = first_params[n];
}
@@ -233,21 +233,21 @@ fn try_fold_redundant_jump(
func.dfg[first_inst].put_value_list(value_list); // Put the new list.
// Bypass the second jump.
// This can disconnect the Ebb containing `second_inst`, to be cleaned up later.
// This can disconnect the Block containing `second_inst`, to be cleaned up later.
let second_dest = func.dfg[second_inst].branch_destination().expect("Dest");
func.change_branch_destination(first_inst, second_dest);
cfg.recompute_ebb(func, ebb);
cfg.recompute_block(func, block);
// The previously-intermediary Ebb may now be unreachable. Update CFG.
// The previously-intermediary Block may now be unreachable. Update CFG.
if cfg.pred_iter(first_dest).count() == 0 {
// Remove all instructions from that ebb.
// Remove all instructions from that block.
while let Some(inst) = func.layout.first_inst(first_dest) {
func.layout.remove_inst(inst);
}
// Remove the block...
cfg.recompute_ebb(func, first_dest); // ...from predecessor lists.
func.layout.remove_ebb(first_dest); // ...from the layout.
cfg.recompute_block(func, first_dest); // ...from predecessor lists.
func.layout.remove_block(first_dest); // ...from the layout.
}
true
@@ -264,14 +264,17 @@ fn fold_redundant_jumps(
// Postorder iteration guarantees that a chain of jumps is visited from
// the end of the chain to the start of the chain.
for &ebb in domtree.cfg_postorder() {
for &block in domtree.cfg_postorder() {
// Only proceed if the first terminator instruction is a single-target branch.
let first_inst = func.layout.last_inst(ebb).expect("Ebb has no terminator");
folded |= try_fold_redundant_jump(func, cfg, ebb, first_inst);
let first_inst = func
.layout
.last_inst(block)
.expect("Block has no terminator");
folded |= try_fold_redundant_jump(func, cfg, block, first_inst);
// Also try the previous instruction.
if let Some(prev_inst) = func.layout.prev_inst(first_inst) {
folded |= try_fold_redundant_jump(func, cfg, ebb, prev_inst);
folded |= try_fold_redundant_jump(func, cfg, block, prev_inst);
}
}
@@ -284,8 +287,11 @@ fn fold_redundant_jumps(
/// Convert `jump` instructions to `fallthrough` instructions where possible and verify that any
/// existing `fallthrough` instructions are correct.
fn fallthroughs(func: &mut Function) {
for (ebb, succ) in func.layout.ebbs().adjacent_pairs() {
let term = func.layout.last_inst(ebb).expect("EBB has no terminator.");
for (block, succ) in func.layout.blocks().adjacent_pairs() {
let term = func
.layout
.last_inst(block)
.expect("block has no terminator.");
if let InstructionData::Jump {
ref mut opcode,
destination,
@@ -296,10 +302,10 @@ fn fallthroughs(func: &mut Function) {
Opcode::Fallthrough => {
// Somebody used a fall-through instruction before the branch relaxation pass.
// Make sure it is correct, i.e. the destination is the layout successor.
debug_assert_eq!(destination, succ, "Illegal fall-through in {}", ebb)
debug_assert_eq!(destination, succ, "Illegal fall-through in {}", block)
}
Opcode::Jump => {
// If this is a jump to the successor EBB, change it to a fall-through.
// If this is a jump to the successor block, change it to a fall-through.
if destination == succ {
*opcode = Opcode::Fallthrough;
func.encodings[term] = Default::default();
@@ -368,18 +374,18 @@ fn relax_branch(
// branches, so one way of extending the range of a conditional branch is to invert its
// condition and make it branch over an unconditional jump which has the larger range.
//
// Splitting the EBB is problematic this late because there may be register diversions in
// Splitting the block is problematic this late because there may be register diversions in
// effect across the conditional branch, and they can't survive the control flow edge to a new
// EBB. We have two options for handling that:
// block. We have two options for handling that:
//
// 1. Set a flag on the new EBB that indicates it wants the preserve the register diversions of
// 1. Set a flag on the new block that indicates it wants the preserve the register diversions of
// its layout predecessor, or
// 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the EBB.
// 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the block.
//
// It seems that 1. would allow us to share code among RISC ISAs that need this.
//
// We can't allow register diversions to survive from the layout predecessor because the layout
// predecessor could contain kill points for some values that are live in this EBB, and
// predecessor could contain kill points for some values that are live in this block, and
// diversions are not automatically cancelled when the live range of a value ends.
// This assumes solution 2. above:

View File

@@ -19,11 +19,11 @@ pub fn shrink_instructions(func: &mut Function, isa: &dyn TargetIsa) {
let encinfo = isa.encoding_info();
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
for block in func.layout.blocks() {
// Load diversions from predecessors.
divert.at_ebb(&func.entry_diversions, ebb);
divert.at_block(&func.entry_diversions, block);
for inst in func.layout.ebb_insts(ebb) {
for inst in func.layout.block_insts(block) {
let enc = func.encodings[inst];
if enc.is_legal() {
// regmove/regfill/regspill are special instructions with register immediates