diff --git a/cranelift/bforest/src/lib.rs b/cranelift/bforest/src/lib.rs index 26e5b8efcf..bc79ffc7d0 100644 --- a/cranelift/bforest/src/lib.rs +++ b/cranelift/bforest/src/lib.rs @@ -148,22 +148,22 @@ mod tests { use super::*; use crate::entity::EntityRef; - /// An opaque reference to an extended basic block in a function. + /// An opaque reference to a basic block in a function. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] - pub struct Ebb(u32); - entity_impl!(Ebb, "ebb"); + pub struct Block(u32); + entity_impl!(Block, "block"); #[test] fn comparator() { - let ebb1 = Ebb::new(1); - let ebb2 = Ebb::new(2); - let ebb3 = Ebb::new(3); - let ebb4 = Ebb::new(4); - let vals = [ebb1, ebb2, ebb4]; + let block1 = Block::new(1); + let block2 = Block::new(2); + let block3 = Block::new(3); + let block4 = Block::new(4); + let vals = [block1, block2, block4]; let comp = (); - assert_eq!(comp.search(ebb1, &vals), Ok(0)); - assert_eq!(comp.search(ebb3, &vals), Err(2)); - assert_eq!(comp.search(ebb4, &vals), Ok(2)); + assert_eq!(comp.search(block1, &vals), Ok(0)); + assert_eq!(comp.search(block3, &vals), Err(2)); + assert_eq!(comp.search(block4, &vals), Ok(2)); } #[test] diff --git a/cranelift/codegen/meta/src/cdsl/ast.rs b/cranelift/codegen/meta/src/cdsl/ast.rs index e4eb202be3..82cdbad762 100644 --- a/cranelift/codegen/meta/src/cdsl/ast.rs +++ b/cranelift/codegen/meta/src/cdsl/ast.rs @@ -708,8 +708,8 @@ macro_rules! def { } // Helper macro to define legalization recipes. -macro_rules! ebb { - // An basic block definition, splitting the current block in 2. +macro_rules! block { + // a basic block definition, splitting the current block in 2. ($block: ident) => { ExprBuilder::block($block).assign_to(Vec::new()) }; diff --git a/cranelift/codegen/meta/src/cdsl/instructions.rs b/cranelift/codegen/meta/src/cdsl/instructions.rs index 10c7f54a7d..f17202eb1c 100644 --- a/cranelift/codegen/meta/src/cdsl/instructions.rs +++ b/cranelift/codegen/meta/src/cdsl/instructions.rs @@ -112,7 +112,7 @@ pub(crate) struct InstructionContent { /// Indices in operands_out of output operands that are values. pub value_results: Vec, - /// True for instructions that terminate the EBB. + /// True for instructions that terminate the block. pub is_terminator: bool, /// True for all branch or jump instructions. pub is_branch: bool, diff --git a/cranelift/codegen/meta/src/gen_inst.rs b/cranelift/codegen/meta/src/gen_inst.rs index ffadd22d57..af54257fea 100644 --- a/cranelift/codegen/meta/src/gen_inst.rs +++ b/cranelift/codegen/meta/src/gen_inst.rs @@ -450,7 +450,7 @@ fn gen_opcodes(all_inst: &AllInstructions, fmt: &mut Formatter) { all_inst, |inst| inst.is_terminator, "is_terminator", - "True for instructions that terminate the EBB", + "True for instructions that terminate the block", fmt, ); gen_bool_accessor( diff --git a/cranelift/codegen/meta/src/gen_legalizer.rs b/cranelift/codegen/meta/src/gen_legalizer.rs index ede525e8bc..c872ba32d6 100644 --- a/cranelift/codegen/meta/src/gen_legalizer.rs +++ b/cranelift/codegen/meta/src/gen_legalizer.rs @@ -474,7 +474,7 @@ fn gen_transform<'a>( // If we are adding some blocks, we need to recall the original block, such that we can // recompute it. if !transform.block_pool.is_empty() { - fmt.line("let orig_ebb = pos.current_ebb().unwrap();"); + fmt.line("let orig_block = pos.current_block().unwrap();"); } // If we're going to delete `inst`, we need to detach its results first so they can be @@ -486,14 +486,14 @@ fn gen_transform<'a>( // Emit new block creation. for block in &transform.block_pool { let var = transform.var_pool.get(block.name); - fmtln!(fmt, "let {} = pos.func.dfg.make_ebb();", var.name); + fmtln!(fmt, "let {} = pos.func.dfg.make_block();", var.name); } // Emit the destination pattern. for &def_index in &transform.dst { if let Some(block) = transform.block_pool.get(def_index) { let var = transform.var_pool.get(block.name); - fmtln!(fmt, "pos.insert_ebb({});", var.name); + fmtln!(fmt, "pos.insert_block({});", var.name); } emit_dst_inst( transform.def_pool.get(def_index), @@ -507,7 +507,7 @@ fn gen_transform<'a>( let def_next_index = transform.def_pool.next_index(); if let Some(block) = transform.block_pool.get(def_next_index) { let var = transform.var_pool.get(block.name); - fmtln!(fmt, "pos.insert_ebb({});", var.name); + fmtln!(fmt, "pos.insert_block({});", var.name); } // Delete the original instruction if we didn't have an opportunity to replace it. @@ -520,14 +520,14 @@ fn gen_transform<'a>( if transform.def_pool.get(transform.src).apply.inst.is_branch { // A branch might have been legalized into multiple branches, so we need to recompute // the cfg. - fmt.line("cfg.recompute_ebb(pos.func, pos.current_ebb().unwrap());"); + fmt.line("cfg.recompute_block(pos.func, pos.current_block().unwrap());"); } } else { // Update CFG for the new blocks. - fmt.line("cfg.recompute_ebb(pos.func, orig_ebb);"); + fmt.line("cfg.recompute_block(pos.func, orig_block);"); for block in &transform.block_pool { let var = transform.var_pool.get(block.name); - fmtln!(fmt, "cfg.recompute_ebb(pos.func, {});", var.name); + fmtln!(fmt, "cfg.recompute_block(pos.func, {});", var.name); } } diff --git a/cranelift/codegen/meta/src/shared/entities.rs b/cranelift/codegen/meta/src/shared/entities.rs index 068987c344..c3f2bc0387 100644 --- a/cranelift/codegen/meta/src/shared/entities.rs +++ b/cranelift/codegen/meta/src/shared/entities.rs @@ -6,9 +6,9 @@ fn new(format_field_name: &'static str, rust_type: &'static str, doc: &'static s } pub(crate) struct EntityRefs { - /// A reference to an extended basic block in the same function. + /// A reference to a basic block in the same function. /// This is primarliy used in control flow instructions. - pub(crate) ebb: OperandKind, + pub(crate) block: OperandKind, /// A reference to a stack slot declared in the function preamble. pub(crate) stack_slot: OperandKind, @@ -33,17 +33,17 @@ pub(crate) struct EntityRefs { /// A reference to a table declared in the function preamble. pub(crate) table: OperandKind, - /// A variable-sized list of value operands. Use for Ebb and function call arguments. + /// A variable-sized list of value operands. Use for Block and function call arguments. pub(crate) varargs: OperandKind, } impl EntityRefs { pub fn new() -> Self { Self { - ebb: new( + block: new( "destination", - "ir::Ebb", - "An extended basic block in the same function.", + "ir::Block", + "a basic block in the same function.", ), stack_slot: new("stack_slot", "ir::StackSlot", "A stack slot"), @@ -64,7 +64,7 @@ impl EntityRefs { A variable size list of `value` operands. Use this to represent arguments passed to a function call, arguments - passed to an extended basic block, or a variable number of results + passed to a basic block, or a variable number of results returned from an instruction. "#, ), diff --git a/cranelift/codegen/meta/src/shared/formats.rs b/cranelift/codegen/meta/src/shared/formats.rs index 3cc3d343d7..03c09e2e2b 100644 --- a/cranelift/codegen/meta/src/shared/formats.rs +++ b/cranelift/codegen/meta/src/shared/formats.rs @@ -140,25 +140,25 @@ impl Formats { .value() .build(), - jump: Builder::new("Jump").imm(&entities.ebb).varargs().build(), + jump: Builder::new("Jump").imm(&entities.block).varargs().build(), branch: Builder::new("Branch") .value() - .imm(&entities.ebb) + .imm(&entities.block) .varargs() .build(), branch_int: Builder::new("BranchInt") .imm(&imm.intcc) .value() - .imm(&entities.ebb) + .imm(&entities.block) .varargs() .build(), branch_float: Builder::new("BranchFloat") .imm(&imm.floatcc) .value() - .imm(&entities.ebb) + .imm(&entities.block) .varargs() .build(), @@ -166,13 +166,13 @@ impl Formats { .imm(&imm.intcc) .value() .value() - .imm(&entities.ebb) + .imm(&entities.block) .varargs() .build(), branch_table: Builder::new("BranchTable") .value() - .imm(&entities.ebb) + .imm(&entities.block) .imm(&entities.jump_table) .build(), diff --git a/cranelift/codegen/meta/src/shared/instructions.rs b/cranelift/codegen/meta/src/shared/instructions.rs index 3207dad96e..08b4f66a7a 100644 --- a/cranelift/codegen/meta/src/shared/instructions.rs +++ b/cranelift/codegen/meta/src/shared/instructions.rs @@ -18,8 +18,8 @@ fn define_control_flow( imm: &Immediates, entities: &EntityRefs, ) { - let EBB = &Operand::new("EBB", &entities.ebb).with_doc("Destination extended basic block"); - let args = &Operand::new("args", &entities.varargs).with_doc("EBB arguments"); + let block = &Operand::new("block", &entities.block).with_doc("Destination basic block"); + let args = &Operand::new("args", &entities.varargs).with_doc("block arguments"); ig.push( Inst::new( @@ -27,13 +27,13 @@ fn define_control_flow( r#" Jump. - Unconditionally jump to an extended basic block, passing the specified - EBB arguments. The number and types of arguments must match the - destination EBB. + Unconditionally jump to a basic block, passing the specified + block arguments. The number and types of arguments must match the + destination block. "#, &formats.jump, ) - .operands_in(vec![EBB, args]) + .operands_in(vec![block, args]) .is_terminator(true) .is_branch(true), ); @@ -42,9 +42,9 @@ fn define_control_flow( Inst::new( "fallthrough", r#" - Fall through to the next EBB. + Fall through to the next block. - This is the same as `jump`, except the destination EBB must be + This is the same as `jump`, except the destination block must be the next one in the layout. Jumps are turned into fall-through instructions by the branch @@ -53,7 +53,7 @@ fn define_control_flow( "#, &formats.jump, ) - .operands_in(vec![EBB, args]) + .operands_in(vec![block, args]) .is_terminator(true) .is_branch(true), ); @@ -81,7 +81,7 @@ fn define_control_flow( "#, &formats.branch, ) - .operands_in(vec![c, EBB, args]) + .operands_in(vec![c, block, args]) .is_branch(true), ); @@ -96,7 +96,7 @@ fn define_control_flow( "#, &formats.branch, ) - .operands_in(vec![c, EBB, args]) + .operands_in(vec![c, block, args]) .is_branch(true), ); } @@ -124,14 +124,14 @@ fn define_control_flow( and take the branch if the condition is true: ```text - br_icmp ugt v1, v2, ebb4(v5, v6) + br_icmp ugt v1, v2, block4(v5, v6) ``` is semantically equivalent to: ```text v10 = icmp ugt, v1, v2 - brnz v10, ebb4(v5, v6) + brnz v10, block4(v5, v6) ``` Some RISC architectures like MIPS and RISC-V provide instructions that @@ -140,7 +140,7 @@ fn define_control_flow( "#, &formats.branch_icmp, ) - .operands_in(vec![Cond, x, y, EBB, args]) + .operands_in(vec![Cond, x, y, block, args]) .is_branch(true), ); @@ -154,7 +154,7 @@ fn define_control_flow( "#, &formats.branch_int, ) - .operands_in(vec![Cond, f, EBB, args]) + .operands_in(vec![Cond, f, block, args]) .is_branch(true), ); } @@ -172,7 +172,7 @@ fn define_control_flow( "#, &formats.branch_float, ) - .operands_in(vec![Cond, f, EBB, args]) + .operands_in(vec![Cond, f, block, args]) .is_branch(true), ); } @@ -188,8 +188,8 @@ fn define_control_flow( Indirect branch via jump table. Use ``x`` as an unsigned index into the jump table ``JT``. If a jump - table entry is found, branch to the corresponding EBB. If no entry was - found or the index is out-of-bounds, branch to the given default EBB. + table entry is found, branch to the corresponding block. If no entry was + found or the index is out-of-bounds, branch to the given default block. Note that this branch instruction can't pass arguments to the targeted blocks. Split critical edges as needed to work around this. @@ -202,7 +202,7 @@ fn define_control_flow( "#, &formats.branch_table, ) - .operands_in(vec![x, EBB, JT]) + .operands_in(vec![x, block, JT]) .is_terminator(true) .is_branch(true), ); @@ -1407,7 +1407,7 @@ pub(crate) fn define( satisfy instruction constraints. The register diversions created by this instruction must be undone - before the value leaves the EBB. At the entry to a new EBB, all live + before the value leaves the block. At the entry to a new block, all live values must be in their originally assigned registers. "#, &formats.reg_move, diff --git a/cranelift/codegen/meta/src/shared/legalize.rs b/cranelift/codegen/meta/src/shared/legalize.rs index 482f5c69c6..1b37f9661b 100644 --- a/cranelift/codegen/meta/src/shared/legalize.rs +++ b/cranelift/codegen/meta/src/shared/legalize.rs @@ -197,9 +197,9 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro let al = var("al"); let ah = var("ah"); let cc = var("cc"); - let ebb = var("ebb"); - let ebb1 = var("ebb1"); - let ebb2 = var("ebb2"); + let block = var("block"); + let block1 = var("block1"); + let block2 = var("block2"); let ptr = var("ptr"); let flags = var("flags"); let offset = var("off"); @@ -269,7 +269,7 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro ); narrow.legalize( - def!(brz.I128(x, ebb, vararg)), + def!(brz.I128(x, block, vararg)), vec![ def!((xl, xh) = isplit(x)), def!( @@ -287,18 +287,18 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro ) ), def!(c = band(a, b)), - def!(brnz(c, ebb, vararg)), + def!(brnz(c, block, vararg)), ], ); narrow.legalize( - def!(brnz.I128(x, ebb1, vararg)), + def!(brnz.I128(x, block1, vararg)), vec![ def!((xl, xh) = isplit(x)), - def!(brnz(xl, ebb1, vararg)), - def!(jump(ebb2, Literal::empty_vararg())), - ebb!(ebb2), - def!(brnz(xh, ebb1, vararg)), + def!(brnz(xl, block1, vararg)), + def!(jump(block2, Literal::empty_vararg())), + block!(block2), + def!(brnz(xh, block1, vararg)), ], ); @@ -619,13 +619,13 @@ pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGro for &ty in &[I8, I16] { widen.legalize( - def!(brz.ty(x, ebb, vararg)), - vec![def!(a = uextend.I32(x)), def!(brz(a, ebb, vararg))], + def!(brz.ty(x, block, vararg)), + vec![def!(a = uextend.I32(x)), def!(brz(a, block, vararg))], ); widen.legalize( - def!(brnz.ty(x, ebb, vararg)), - vec![def!(a = uextend.I32(x)), def!(brnz(a, ebb, vararg))], + def!(brnz.ty(x, block, vararg)), + vec![def!(a = uextend.I32(x)), def!(brnz(a, block, vararg))], ); } diff --git a/cranelift/codegen/src/abi.rs b/cranelift/codegen/src/abi.rs index f3591c1730..3a3ed7a53b 100644 --- a/cranelift/codegen/src/abi.rs +++ b/cranelift/codegen/src/abi.rs @@ -135,7 +135,7 @@ pub fn legalize_args(args: &[AbiParam], aa: &mut AA) -> Option< /// /// The legalizer needs to repair the values at all ABI boundaries: /// -/// - Incoming function arguments to the entry EBB. +/// - Incoming function arguments to the entry block. /// - Function arguments passed to a call. /// - Return values from a call. /// - Return values passed to a return instruction. diff --git a/cranelift/codegen/src/binemit/memorysink.rs b/cranelift/codegen/src/binemit/memorysink.rs index 7e1cdf57a2..60c7fe251e 100644 --- a/cranelift/codegen/src/binemit/memorysink.rs +++ b/cranelift/codegen/src/binemit/memorysink.rs @@ -74,8 +74,8 @@ impl<'a> MemoryCodeSink<'a> { /// A trait for receiving relocations for code that is emitted directly into memory. pub trait RelocSink { - /// Add a relocation referencing an EBB at the current offset. - fn reloc_ebb(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset); + /// Add a relocation referencing an block at the current offset. + fn reloc_block(&mut self, _: CodeOffset, _: Reloc, _: CodeOffset); /// Add a relocation referencing an external symbol at the current offset. fn reloc_external(&mut self, _: CodeOffset, _: Reloc, _: &ExternalName, _: Addend); @@ -127,9 +127,9 @@ impl<'a> CodeSink for MemoryCodeSink<'a> { self.write(x); } - fn reloc_ebb(&mut self, rel: Reloc, ebb_offset: CodeOffset) { + fn reloc_block(&mut self, rel: Reloc, block_offset: CodeOffset) { let ofs = self.offset(); - self.relocs.reloc_ebb(ofs, rel, ebb_offset); + self.relocs.reloc_block(ofs, rel, block_offset); } fn reloc_external(&mut self, rel: Reloc, name: &ExternalName, addend: Addend) { @@ -177,7 +177,7 @@ impl<'a> CodeSink for MemoryCodeSink<'a> { pub struct NullRelocSink {} impl RelocSink for NullRelocSink { - fn reloc_ebb(&mut self, _: u32, _: Reloc, _: u32) {} + fn reloc_block(&mut self, _: u32, _: Reloc, _: u32) {} fn reloc_external(&mut self, _: u32, _: Reloc, _: &ExternalName, _: i64) {} fn reloc_constant(&mut self, _: CodeOffset, _: Reloc, _: ConstantOffset) {} fn reloc_jt(&mut self, _: u32, _: Reloc, _: JumpTable) {} diff --git a/cranelift/codegen/src/binemit/mod.rs b/cranelift/codegen/src/binemit/mod.rs index 450bcd33d6..7a781e2b56 100644 --- a/cranelift/codegen/src/binemit/mod.rs +++ b/cranelift/codegen/src/binemit/mod.rs @@ -127,8 +127,8 @@ pub trait CodeSink { /// Add 8 bytes to the code section. fn put8(&mut self, _: u64); - /// Add a relocation referencing an EBB at the current offset. - fn reloc_ebb(&mut self, _: Reloc, _: CodeOffset); + /// Add a relocation referencing an block at the current offset. + fn reloc_block(&mut self, _: Reloc, _: CodeOffset); /// Add a relocation referencing an external symbol plus the addend at the current offset. fn reloc_external(&mut self, _: Reloc, _: &ExternalName, _: Addend); @@ -205,10 +205,10 @@ where EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS, &dyn TargetIsa), { let mut divert = RegDiversions::new(); - for ebb in func.layout.ebbs() { - divert.at_ebb(&func.entry_diversions, ebb); - debug_assert_eq!(func.offsets[ebb], sink.offset()); - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + divert.at_block(&func.entry_diversions, block); + debug_assert_eq!(func.offsets[block], sink.offset()); + for inst in func.layout.block_insts(block) { emit_inst(func, inst, &mut divert, sink, isa); } } @@ -218,8 +218,8 @@ where // Output jump tables. for (jt, jt_data) in func.jump_tables.iter() { let jt_offset = func.jt_offsets[jt]; - for ebb in jt_data.iter() { - let rel_offset: i32 = func.offsets[*ebb] as i32 - jt_offset as i32; + for block in jt_data.iter() { + let rel_offset: i32 = func.offsets[*block] as i32 - jt_offset as i32; sink.put4(rel_offset as u32) } } diff --git a/cranelift/codegen/src/binemit/relaxation.rs b/cranelift/codegen/src/binemit/relaxation.rs index 3cd8b68de0..abdd778aaf 100644 --- a/cranelift/codegen/src/binemit/relaxation.rs +++ b/cranelift/codegen/src/binemit/relaxation.rs @@ -1,9 +1,9 @@ //! Branch relaxation and offset computation. //! -//! # EBB header offsets +//! # block header offsets //! //! Before we can generate binary machine code for branch instructions, we need to know the final -//! offsets of all the EBB headers in the function. This information is encoded in the +//! offsets of all the block headers in the function. This information is encoded in the //! `func.offsets` table. //! //! # Branch relaxation @@ -16,22 +16,22 @@ //! unconditional branches: //! //! ```clif -//! brz v1, ebb17 +//! brz v1, block17 //! ``` //! //! can be transformed into: //! //! ```clif -//! brnz v1, ebb23 -//! jump ebb17 -//! ebb23: +//! brnz v1, block23 +//! jump block17 +//! block23: //! ``` use crate::binemit::{CodeInfo, CodeOffset}; use crate::cursor::{Cursor, FuncCursor}; use crate::dominator_tree::DominatorTree; use crate::flowgraph::ControlFlowGraph; -use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueList}; +use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueList}; use crate::isa::{EncInfo, TargetIsa}; use crate::iterators::IteratorExtras; use crate::regalloc::RegDiversions; @@ -40,7 +40,7 @@ use crate::CodegenResult; use core::convert::TryFrom; use log::debug; -/// Relax branches and compute the final layout of EBB headers in `func`. +/// Relax branches and compute the final layout of block headers in `func`. /// /// Fill in the `func.offsets` table so the function is ready for binary emission. pub fn relax_branches( @@ -53,9 +53,9 @@ pub fn relax_branches( let encinfo = isa.encoding_info(); - // Clear all offsets so we can recognize EBBs that haven't been visited yet. + // Clear all offsets so we can recognize blocks that haven't been visited yet. func.offsets.clear(); - func.offsets.resize(func.dfg.num_ebbs()); + func.offsets.resize(func.dfg.num_blocks()); // Start by removing redundant jumps. fold_redundant_jumps(func, _cfg, _domtree); @@ -66,12 +66,12 @@ pub fn relax_branches( let mut offset = 0; let mut divert = RegDiversions::new(); - // First, compute initial offsets for every EBB. + // First, compute initial offsets for every block. { let mut cur = FuncCursor::new(func); - while let Some(ebb) = cur.next_ebb() { - divert.at_ebb(&cur.func.entry_diversions, ebb); - cur.func.offsets[ebb] = offset; + while let Some(block) = cur.next_block() { + divert.at_block(&cur.func.entry_diversions, block); + cur.func.offsets[block] = offset; while let Some(inst) = cur.next_inst() { divert.apply(&cur.func.dfg[inst]); let enc = cur.func.encodings[inst]; @@ -88,12 +88,12 @@ pub fn relax_branches( // Visit all instructions in layout order. let mut cur = FuncCursor::new(func); - while let Some(ebb) = cur.next_ebb() { - divert.at_ebb(&cur.func.entry_diversions, ebb); + while let Some(block) = cur.next_block() { + divert.at_block(&cur.func.entry_diversions, block); - // Record the offset for `ebb` and make sure we iterate until offsets are stable. - if cur.func.offsets[ebb] != offset { - cur.func.offsets[ebb] = offset; + // Record the offset for `block` and make sure we iterate until offsets are stable. + if cur.func.offsets[block] != offset { + cur.func.offsets[block] = offset; go_again = true; } @@ -153,21 +153,21 @@ pub fn relax_branches( fn try_fold_redundant_jump( func: &mut Function, cfg: &mut ControlFlowGraph, - ebb: Ebb, + block: Block, first_inst: Inst, ) -> bool { let first_dest = match func.dfg[first_inst].branch_destination() { - Some(ebb) => ebb, // The instruction was a single-target branch. + Some(block) => block, // The instruction was a single-target branch. None => { return false; // The instruction was either multi-target or not a branch. } }; - // For the moment, only attempt to fold a branch to an ebb that is parameterless. + // For the moment, only attempt to fold a branch to an block that is parameterless. // These blocks are mainly produced by critical edge splitting. // // TODO: Allow folding blocks that define SSA values and function as phi nodes. - if func.dfg.num_ebb_params(first_dest) != 0 { + if func.dfg.num_block_params(first_dest) != 0 { return false; } @@ -178,7 +178,7 @@ fn try_fold_redundant_jump( return false; } - // Now we need to fix up first_inst's ebb parameters to match second_inst's, + // Now we need to fix up first_inst's block parameters to match second_inst's, // without changing the branch-specific arguments. // // The intermediary block is allowed to reference any SSA value that dominates it, @@ -208,14 +208,14 @@ fn try_fold_redundant_jump( // was a block parameter, rewrite it to refer to the value that the first jump // passed in its parameters. Otherwise, make sure it dominates first_inst. // - // For example: if we `ebb0: jump ebb1(v1)` to `ebb1(v2): jump ebb2(v2)`, - // we want to rewrite the original jump to `jump ebb2(v1)`. - let ebb_params: &[Value] = func.dfg.ebb_params(first_dest); - debug_assert!(ebb_params.len() == first_params.len()); + // For example: if we `block0: jump block1(v1)` to `block1(v2): jump block2(v2)`, + // we want to rewrite the original jump to `jump block2(v1)`. + let block_params: &[Value] = func.dfg.block_params(first_dest); + debug_assert!(block_params.len() == first_params.len()); for value in second_params.iter_mut() { - if let Some((n, _)) = ebb_params.iter().enumerate().find(|(_, &p)| p == *value) { - // This value was the Nth parameter passed to the second_inst's ebb. + if let Some((n, _)) = block_params.iter().enumerate().find(|(_, &p)| p == *value) { + // This value was the Nth parameter passed to the second_inst's block. // Rewrite it as the Nth parameter passed by first_inst. *value = first_params[n]; } @@ -233,21 +233,21 @@ fn try_fold_redundant_jump( func.dfg[first_inst].put_value_list(value_list); // Put the new list. // Bypass the second jump. - // This can disconnect the Ebb containing `second_inst`, to be cleaned up later. + // This can disconnect the Block containing `second_inst`, to be cleaned up later. let second_dest = func.dfg[second_inst].branch_destination().expect("Dest"); func.change_branch_destination(first_inst, second_dest); - cfg.recompute_ebb(func, ebb); + cfg.recompute_block(func, block); - // The previously-intermediary Ebb may now be unreachable. Update CFG. + // The previously-intermediary Block may now be unreachable. Update CFG. if cfg.pred_iter(first_dest).count() == 0 { - // Remove all instructions from that ebb. + // Remove all instructions from that block. while let Some(inst) = func.layout.first_inst(first_dest) { func.layout.remove_inst(inst); } // Remove the block... - cfg.recompute_ebb(func, first_dest); // ...from predecessor lists. - func.layout.remove_ebb(first_dest); // ...from the layout. + cfg.recompute_block(func, first_dest); // ...from predecessor lists. + func.layout.remove_block(first_dest); // ...from the layout. } true @@ -264,14 +264,17 @@ fn fold_redundant_jumps( // Postorder iteration guarantees that a chain of jumps is visited from // the end of the chain to the start of the chain. - for &ebb in domtree.cfg_postorder() { + for &block in domtree.cfg_postorder() { // Only proceed if the first terminator instruction is a single-target branch. - let first_inst = func.layout.last_inst(ebb).expect("Ebb has no terminator"); - folded |= try_fold_redundant_jump(func, cfg, ebb, first_inst); + let first_inst = func + .layout + .last_inst(block) + .expect("Block has no terminator"); + folded |= try_fold_redundant_jump(func, cfg, block, first_inst); // Also try the previous instruction. if let Some(prev_inst) = func.layout.prev_inst(first_inst) { - folded |= try_fold_redundant_jump(func, cfg, ebb, prev_inst); + folded |= try_fold_redundant_jump(func, cfg, block, prev_inst); } } @@ -284,8 +287,11 @@ fn fold_redundant_jumps( /// Convert `jump` instructions to `fallthrough` instructions where possible and verify that any /// existing `fallthrough` instructions are correct. fn fallthroughs(func: &mut Function) { - for (ebb, succ) in func.layout.ebbs().adjacent_pairs() { - let term = func.layout.last_inst(ebb).expect("EBB has no terminator."); + for (block, succ) in func.layout.blocks().adjacent_pairs() { + let term = func + .layout + .last_inst(block) + .expect("block has no terminator."); if let InstructionData::Jump { ref mut opcode, destination, @@ -296,10 +302,10 @@ fn fallthroughs(func: &mut Function) { Opcode::Fallthrough => { // Somebody used a fall-through instruction before the branch relaxation pass. // Make sure it is correct, i.e. the destination is the layout successor. - debug_assert_eq!(destination, succ, "Illegal fall-through in {}", ebb) + debug_assert_eq!(destination, succ, "Illegal fall-through in {}", block) } Opcode::Jump => { - // If this is a jump to the successor EBB, change it to a fall-through. + // If this is a jump to the successor block, change it to a fall-through. if destination == succ { *opcode = Opcode::Fallthrough; func.encodings[term] = Default::default(); @@ -368,18 +374,18 @@ fn relax_branch( // branches, so one way of extending the range of a conditional branch is to invert its // condition and make it branch over an unconditional jump which has the larger range. // - // Splitting the EBB is problematic this late because there may be register diversions in + // Splitting the block is problematic this late because there may be register diversions in // effect across the conditional branch, and they can't survive the control flow edge to a new - // EBB. We have two options for handling that: + // block. We have two options for handling that: // - // 1. Set a flag on the new EBB that indicates it wants the preserve the register diversions of + // 1. Set a flag on the new block that indicates it wants the preserve the register diversions of // its layout predecessor, or - // 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the EBB. + // 2. Use an encoding macro for the branch-over-jump pattern so we don't need to split the block. // // It seems that 1. would allow us to share code among RISC ISAs that need this. // // We can't allow register diversions to survive from the layout predecessor because the layout - // predecessor could contain kill points for some values that are live in this EBB, and + // predecessor could contain kill points for some values that are live in this block, and // diversions are not automatically cancelled when the live range of a value ends. // This assumes solution 2. above: diff --git a/cranelift/codegen/src/binemit/shrink.rs b/cranelift/codegen/src/binemit/shrink.rs index 084ed2bc3d..f6fa43e062 100644 --- a/cranelift/codegen/src/binemit/shrink.rs +++ b/cranelift/codegen/src/binemit/shrink.rs @@ -19,11 +19,11 @@ pub fn shrink_instructions(func: &mut Function, isa: &dyn TargetIsa) { let encinfo = isa.encoding_info(); let mut divert = RegDiversions::new(); - for ebb in func.layout.ebbs() { + for block in func.layout.blocks() { // Load diversions from predecessors. - divert.at_ebb(&func.entry_diversions, ebb); + divert.at_block(&func.entry_diversions, block); - for inst in func.layout.ebb_insts(ebb) { + for inst in func.layout.block_insts(block) { let enc = func.encodings[inst]; if enc.is_legal() { // regmove/regfill/regspill are special instructions with register immediates diff --git a/cranelift/codegen/src/cfg_printer.rs b/cranelift/codegen/src/cfg_printer.rs index ed47475295..364b2985fe 100644 --- a/cranelift/codegen/src/cfg_printer.rs +++ b/cranelift/codegen/src/cfg_printer.rs @@ -4,7 +4,7 @@ use alloc::vec::Vec; use core::fmt::{Display, Formatter, Result, Write}; use crate::entity::SecondaryMap; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir::Function; use crate::write::{FuncWriter, PlainWriter}; @@ -27,7 +27,7 @@ impl<'a> CFGPrinter<'a> { /// Write the CFG for this function to `w`. pub fn write(&self, w: &mut dyn Write) -> Result { self.header(w)?; - self.ebb_nodes(w)?; + self.block_nodes(w)?; self.cfg_connections(w)?; writeln!(w, "}}") } @@ -40,7 +40,7 @@ impl<'a> CFGPrinter<'a> { Ok(()) } - fn ebb_nodes(&self, w: &mut dyn Write) -> Result { + fn block_nodes(&self, w: &mut dyn Write) -> Result { let mut aliases = SecondaryMap::<_, Vec<_>>::new(); for v in self.func.dfg.values() { // VADFS returns the immediate target of an alias @@ -49,11 +49,11 @@ impl<'a> CFGPrinter<'a> { } } - for ebb in &self.func.layout { - write!(w, " {} [shape=record, label=\"{{", ebb)?; - crate::write::write_ebb_header(w, self.func, None, ebb, 4)?; + for block in &self.func.layout { + write!(w, " {} [shape=record, label=\"{{", block)?; + crate::write::write_block_header(w, self.func, None, block, 4)?; // Add all outgoing branch instructions to the label. - for inst in self.func.layout.ebb_insts(ebb) { + for inst in self.func.layout.block_insts(block) { write!(w, " | <{}>", inst)?; PlainWriter.write_instruction(w, self.func, &aliases, None, inst, 0)?; } @@ -63,9 +63,13 @@ impl<'a> CFGPrinter<'a> { } fn cfg_connections(&self, w: &mut dyn Write) -> Result { - for ebb in &self.func.layout { - for BasicBlock { ebb: parent, inst } in self.cfg.pred_iter(ebb) { - writeln!(w, " {}:{} -> {}", parent, inst, ebb)?; + for block in &self.func.layout { + for BlockPredecessor { + block: parent, + inst, + } in self.cfg.pred_iter(block) + { + writeln!(w, " {}:{} -> {}", parent, inst, block)?; } } Ok(()) diff --git a/cranelift/codegen/src/cursor.rs b/cranelift/codegen/src/cursor.rs index e2ab8e5e5f..51345cde47 100644 --- a/cranelift/codegen/src/cursor.rs +++ b/cranelift/codegen/src/cursor.rs @@ -13,12 +13,12 @@ pub enum CursorPosition { /// Cursor is pointing at an existing instruction. /// New instructions will be inserted *before* the current instruction. At(ir::Inst), - /// Cursor is before the beginning of an EBB. No instructions can be inserted. Calling - /// `next_inst()` will move to the first instruction in the EBB. - Before(ir::Ebb), - /// Cursor is pointing after the end of an EBB. - /// New instructions will be appended to the EBB. - After(ir::Ebb), + /// Cursor is before the beginning of an block. No instructions can be inserted. Calling + /// `next_inst()` will move to the first instruction in the block. + Before(ir::Block), + /// Cursor is pointing after the end of an block. + /// New instructions will be appended to the block. + After(ir::Block), } /// All cursor types implement the `Cursor` which provides common navigation operations. @@ -46,7 +46,7 @@ pub trait Cursor { /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, SourceLoc}; + /// # use cranelift_codegen::ir::{Function, Block, SourceLoc}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function, srcloc: SourceLoc) { /// let mut pos = FuncCursor::new(func).with_srcloc(srcloc); @@ -76,7 +76,7 @@ pub trait Cursor { /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function, inst: Inst) { /// let mut pos = FuncCursor::new(func).at_inst(inst); @@ -92,68 +92,68 @@ pub trait Cursor { self } - /// Rebuild this cursor positioned at the first insertion point for `ebb`. + /// Rebuild this cursor positioned at the first insertion point for `block`. /// This differs from `at_first_inst` in that it doesn't assume that any - /// instructions have been inserted into `ebb` yet. + /// instructions have been inserted into `block` yet. /// /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, ebb: Ebb) { - /// let mut pos = FuncCursor::new(func).at_first_insertion_point(ebb); + /// fn edit_func(func: &mut Function, block: Block) { + /// let mut pos = FuncCursor::new(func).at_first_insertion_point(block); /// /// // Use `pos`... /// } /// ``` - fn at_first_insertion_point(mut self, ebb: ir::Ebb) -> Self + fn at_first_insertion_point(mut self, block: ir::Block) -> Self where Self: Sized, { - self.goto_first_insertion_point(ebb); + self.goto_first_insertion_point(block); self } - /// Rebuild this cursor positioned at the first instruction in `ebb`. + /// Rebuild this cursor positioned at the first instruction in `block`. /// /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, ebb: Ebb) { - /// let mut pos = FuncCursor::new(func).at_first_inst(ebb); + /// fn edit_func(func: &mut Function, block: Block) { + /// let mut pos = FuncCursor::new(func).at_first_inst(block); /// /// // Use `pos`... /// } /// ``` - fn at_first_inst(mut self, ebb: ir::Ebb) -> Self + fn at_first_inst(mut self, block: ir::Block) -> Self where Self: Sized, { - self.goto_first_inst(ebb); + self.goto_first_inst(block); self } - /// Rebuild this cursor positioned at the last instruction in `ebb`. + /// Rebuild this cursor positioned at the last instruction in `block`. /// /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, ebb: Ebb) { - /// let mut pos = FuncCursor::new(func).at_last_inst(ebb); + /// fn edit_func(func: &mut Function, block: Block) { + /// let mut pos = FuncCursor::new(func).at_last_inst(block); /// /// // Use `pos`... /// } /// ``` - fn at_last_inst(mut self, ebb: ir::Ebb) -> Self + fn at_last_inst(mut self, block: ir::Block) -> Self where Self: Sized, { - self.goto_last_inst(ebb); + self.goto_last_inst(block); self } @@ -162,7 +162,7 @@ pub trait Cursor { /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function, inst: Inst) { /// let mut pos = FuncCursor::new(func).after_inst(inst); @@ -178,55 +178,55 @@ pub trait Cursor { self } - /// Rebuild this cursor positioned at the top of `ebb`. + /// Rebuild this cursor positioned at the top of `block`. /// /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, ebb: Ebb) { - /// let mut pos = FuncCursor::new(func).at_top(ebb); + /// fn edit_func(func: &mut Function, block: Block) { + /// let mut pos = FuncCursor::new(func).at_top(block); /// /// // Use `pos`... /// } /// ``` - fn at_top(mut self, ebb: ir::Ebb) -> Self + fn at_top(mut self, block: ir::Block) -> Self where Self: Sized, { - self.goto_top(ebb); + self.goto_top(block); self } - /// Rebuild this cursor positioned at the bottom of `ebb`. + /// Rebuild this cursor positioned at the bottom of `block`. /// /// This is intended to be used as a builder method: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb, Inst}; + /// # use cranelift_codegen::ir::{Function, Block, Inst}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, ebb: Ebb) { - /// let mut pos = FuncCursor::new(func).at_bottom(ebb); + /// fn edit_func(func: &mut Function, block: Block) { + /// let mut pos = FuncCursor::new(func).at_bottom(block); /// /// // Use `pos`... /// } /// ``` - fn at_bottom(mut self, ebb: ir::Ebb) -> Self + fn at_bottom(mut self, block: ir::Block) -> Self where Self: Sized, { - self.goto_bottom(ebb); + self.goto_bottom(block); self } - /// Get the EBB corresponding to the current position. - fn current_ebb(&self) -> Option { + /// Get the block corresponding to the current position. + fn current_block(&self) -> Option { use self::CursorPosition::*; match self.position() { Nowhere => None, - At(inst) => self.layout().inst_ebb(inst), - Before(ebb) | After(ebb) => Some(ebb), + At(inst) => self.layout().inst_block(inst), + Before(block) | After(block) => Some(block), } } @@ -242,13 +242,13 @@ pub trait Cursor { /// Go to the position after a specific instruction, which must be inserted /// in the layout. New instructions will be inserted after `inst`. fn goto_after_inst(&mut self, inst: ir::Inst) { - debug_assert!(self.layout().inst_ebb(inst).is_some()); + debug_assert!(self.layout().inst_block(inst).is_some()); let new_pos = if let Some(next) = self.layout().next_inst(inst) { CursorPosition::At(next) } else { CursorPosition::After( self.layout() - .inst_ebb(inst) + .inst_block(inst) .expect("current instruction removed?"), ) }; @@ -258,133 +258,133 @@ pub trait Cursor { /// Go to a specific instruction which must be inserted in the layout. /// New instructions will be inserted before `inst`. fn goto_inst(&mut self, inst: ir::Inst) { - debug_assert!(self.layout().inst_ebb(inst).is_some()); + debug_assert!(self.layout().inst_block(inst).is_some()); self.set_position(CursorPosition::At(inst)); } - /// Go to the position for inserting instructions at the beginning of `ebb`, + /// Go to the position for inserting instructions at the beginning of `block`, /// which unlike `goto_first_inst` doesn't assume that any instructions have - /// been inserted into `ebb` yet. - fn goto_first_insertion_point(&mut self, ebb: ir::Ebb) { - if let Some(inst) = self.layout().first_inst(ebb) { + /// been inserted into `block` yet. + fn goto_first_insertion_point(&mut self, block: ir::Block) { + if let Some(inst) = self.layout().first_inst(block) { self.goto_inst(inst); } else { - self.goto_bottom(ebb); + self.goto_bottom(block); } } - /// Go to the first instruction in `ebb`. - fn goto_first_inst(&mut self, ebb: ir::Ebb) { - let inst = self.layout().first_inst(ebb).expect("Empty EBB"); + /// Go to the first instruction in `block`. + fn goto_first_inst(&mut self, block: ir::Block) { + let inst = self.layout().first_inst(block).expect("Empty block"); self.goto_inst(inst); } - /// Go to the last instruction in `ebb`. - fn goto_last_inst(&mut self, ebb: ir::Ebb) { - let inst = self.layout().last_inst(ebb).expect("Empty EBB"); + /// Go to the last instruction in `block`. + fn goto_last_inst(&mut self, block: ir::Block) { + let inst = self.layout().last_inst(block).expect("Empty block"); self.goto_inst(inst); } - /// Go to the top of `ebb` which must be inserted into the layout. + /// Go to the top of `block` which must be inserted into the layout. /// At this position, instructions cannot be inserted, but `next_inst()` will move to the first - /// instruction in `ebb`. - fn goto_top(&mut self, ebb: ir::Ebb) { - debug_assert!(self.layout().is_ebb_inserted(ebb)); - self.set_position(CursorPosition::Before(ebb)); + /// instruction in `block`. + fn goto_top(&mut self, block: ir::Block) { + debug_assert!(self.layout().is_block_inserted(block)); + self.set_position(CursorPosition::Before(block)); } - /// Go to the bottom of `ebb` which must be inserted into the layout. - /// At this position, inserted instructions will be appended to `ebb`. - fn goto_bottom(&mut self, ebb: ir::Ebb) { - debug_assert!(self.layout().is_ebb_inserted(ebb)); - self.set_position(CursorPosition::After(ebb)); + /// Go to the bottom of `block` which must be inserted into the layout. + /// At this position, inserted instructions will be appended to `block`. + fn goto_bottom(&mut self, block: ir::Block) { + debug_assert!(self.layout().is_block_inserted(block)); + self.set_position(CursorPosition::After(block)); } - /// Go to the top of the next EBB in layout order and return it. + /// Go to the top of the next block in layout order and return it. /// - /// - If the cursor wasn't pointing at anything, go to the top of the first EBB in the + /// - If the cursor wasn't pointing at anything, go to the top of the first block in the /// function. - /// - If there are no more EBBs, leave the cursor pointing at nothing and return `None`. + /// - If there are no more blocks, leave the cursor pointing at nothing and return `None`. /// /// # Examples /// - /// The `next_ebb()` method is intended for iterating over the EBBs in layout order: + /// The `next_block()` method is intended for iterating over the blocks in layout order: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb}; + /// # use cranelift_codegen::ir::{Function, Block}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function) { /// let mut cursor = FuncCursor::new(func); - /// while let Some(ebb) = cursor.next_ebb() { - /// // Edit ebb. + /// while let Some(block) = cursor.next_block() { + /// // Edit block. /// } /// } /// ``` - fn next_ebb(&mut self) -> Option { - let next = if let Some(ebb) = self.current_ebb() { - self.layout().next_ebb(ebb) + fn next_block(&mut self) -> Option { + let next = if let Some(block) = self.current_block() { + self.layout().next_block(block) } else { self.layout().entry_block() }; self.set_position(match next { - Some(ebb) => CursorPosition::Before(ebb), + Some(block) => CursorPosition::Before(block), None => CursorPosition::Nowhere, }); next } - /// Go to the bottom of the previous EBB in layout order and return it. + /// Go to the bottom of the previous block in layout order and return it. /// - /// - If the cursor wasn't pointing at anything, go to the bottom of the last EBB in the + /// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the /// function. - /// - If there are no more EBBs, leave the cursor pointing at nothing and return `None`. + /// - If there are no more blocks, leave the cursor pointing at nothing and return `None`. /// /// # Examples /// - /// The `prev_ebb()` method is intended for iterating over the EBBs in backwards layout order: + /// The `prev_block()` method is intended for iterating over the blocks in backwards layout order: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb}; + /// # use cranelift_codegen::ir::{Function, Block}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function) { /// let mut cursor = FuncCursor::new(func); - /// while let Some(ebb) = cursor.prev_ebb() { - /// // Edit ebb. + /// while let Some(block) = cursor.prev_block() { + /// // Edit block. /// } /// } /// ``` - fn prev_ebb(&mut self) -> Option { - let prev = if let Some(ebb) = self.current_ebb() { - self.layout().prev_ebb(ebb) + fn prev_block(&mut self) -> Option { + let prev = if let Some(block) = self.current_block() { + self.layout().prev_block(block) } else { - self.layout().last_ebb() + self.layout().last_block() }; self.set_position(match prev { - Some(ebb) => CursorPosition::After(ebb), + Some(block) => CursorPosition::After(block), None => CursorPosition::Nowhere, }); prev } - /// Move to the next instruction in the same EBB and return it. + /// Move to the next instruction in the same block and return it. /// - /// - If the cursor was positioned before an EBB, go to the first instruction in that EBB. - /// - If there are no more instructions in the EBB, go to the `After(ebb)` position and return + /// - If the cursor was positioned before an block, go to the first instruction in that block. + /// - If there are no more instructions in the block, go to the `After(block)` position and return /// `None`. /// - If the cursor wasn't pointing anywhere, keep doing that. /// - /// This method will never move the cursor to a different EBB. + /// This method will never move the cursor to a different block. /// /// # Examples /// - /// The `next_inst()` method is intended for iterating over the instructions in an EBB like + /// The `next_inst()` method is intended for iterating over the instructions in an block like /// this: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb}; + /// # use cranelift_codegen::ir::{Function, Block}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_ebb(func: &mut Function, ebb: Ebb) { - /// let mut cursor = FuncCursor::new(func).at_top(ebb); + /// fn edit_block(func: &mut Function, block: Block) { + /// let mut cursor = FuncCursor::new(func).at_top(block); /// while let Some(inst) = cursor.next_inst() { /// // Edit instructions... /// } @@ -395,11 +395,11 @@ pub trait Cursor { /// Iterating over all the instructions in a function looks like this: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb}; + /// # use cranelift_codegen::ir::{Function, Block}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; /// fn edit_func(func: &mut Function) { /// let mut cursor = FuncCursor::new(func); - /// while let Some(ebb) = cursor.next_ebb() { + /// while let Some(block) = cursor.next_block() { /// while let Some(inst) = cursor.next_inst() { /// // Edit instructions... /// } @@ -417,44 +417,44 @@ pub trait Cursor { } else { let pos = After( self.layout() - .inst_ebb(inst) + .inst_block(inst) .expect("current instruction removed?"), ); self.set_position(pos); None } } - Before(ebb) => { - if let Some(next) = self.layout().first_inst(ebb) { + Before(block) => { + if let Some(next) = self.layout().first_inst(block) { self.set_position(At(next)); Some(next) } else { - self.set_position(After(ebb)); + self.set_position(After(block)); None } } } } - /// Move to the previous instruction in the same EBB and return it. + /// Move to the previous instruction in the same block and return it. /// - /// - If the cursor was positioned after an EBB, go to the last instruction in that EBB. - /// - If there are no more instructions in the EBB, go to the `Before(ebb)` position and return + /// - If the cursor was positioned after an block, go to the last instruction in that block. + /// - If there are no more instructions in the block, go to the `Before(block)` position and return /// `None`. /// - If the cursor wasn't pointing anywhere, keep doing that. /// - /// This method will never move the cursor to a different EBB. + /// This method will never move the cursor to a different block. /// /// # Examples /// /// The `prev_inst()` method is intended for iterating backwards over the instructions in an - /// EBB like this: + /// block like this: /// /// ``` - /// # use cranelift_codegen::ir::{Function, Ebb}; + /// # use cranelift_codegen::ir::{Function, Block}; /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_ebb(func: &mut Function, ebb: Ebb) { - /// let mut cursor = FuncCursor::new(func).at_bottom(ebb); + /// fn edit_block(func: &mut Function, block: Block) { + /// let mut cursor = FuncCursor::new(func).at_bottom(block); /// while let Some(inst) = cursor.prev_inst() { /// // Edit instructions... /// } @@ -471,19 +471,19 @@ pub trait Cursor { } else { let pos = Before( self.layout() - .inst_ebb(inst) + .inst_block(inst) .expect("current instruction removed?"), ); self.set_position(pos); None } } - After(ebb) => { - if let Some(prev) = self.layout().last_inst(ebb) { + After(block) => { + if let Some(prev) = self.layout().last_inst(block) { self.set_position(At(prev)); Some(prev) } else { - self.set_position(Before(ebb)); + self.set_position(Before(block)); None } } @@ -494,17 +494,17 @@ pub trait Cursor { /// /// - If pointing at an instruction, the new instruction is inserted before the current /// instruction. - /// - If pointing at the bottom of an EBB, the new instruction is appended to the EBB. + /// - If pointing at the bottom of an block, the new instruction is appended to the block. /// - Otherwise panic. /// /// In either case, the cursor is not moved, such that repeated calls to `insert_inst()` causes - /// instructions to appear in insertion order in the EBB. + /// instructions to appear in insertion order in the block. fn insert_inst(&mut self, inst: ir::Inst) { use self::CursorPosition::*; match self.position() { Nowhere | Before(..) => panic!("Invalid insert_inst position"), At(cur) => self.layout_mut().insert_inst(inst, cur), - After(ebb) => self.layout_mut().append_inst(inst, ebb), + After(block) => self.layout_mut().append_inst(inst, block), } } @@ -532,34 +532,34 @@ pub trait Cursor { inst } - /// Insert an EBB at the current position and switch to it. + /// Insert an block at the current position and switch to it. /// - /// As far as possible, this method behaves as if the EBB header were an instruction inserted + /// As far as possible, this method behaves as if the block header were an instruction inserted /// at the current position. /// - /// - If the cursor is pointing at an existing instruction, *the current EBB is split in two* - /// and the current instruction becomes the first instruction in the inserted EBB. - /// - If the cursor points at the bottom of an EBB, the new EBB is inserted after the current - /// one, and moved to the bottom of the new EBB where instructions can be appended. - /// - If the cursor points to the top of an EBB, the new EBB is inserted above the current one. - /// - If the cursor is not pointing at anything, the new EBB is placed last in the layout. + /// - If the cursor is pointing at an existing instruction, *the current block is split in two* + /// and the current instruction becomes the first instruction in the inserted block. + /// - If the cursor points at the bottom of an block, the new block is inserted after the current + /// one, and moved to the bottom of the new block where instructions can be appended. + /// - If the cursor points to the top of an block, the new block is inserted above the current one. + /// - If the cursor is not pointing at anything, the new block is placed last in the layout. /// /// This means that it is always valid to call this method, and it always leaves the cursor in - /// a state that will insert instructions into the new EBB. - fn insert_ebb(&mut self, new_ebb: ir::Ebb) { + /// a state that will insert instructions into the new block. + fn insert_block(&mut self, new_block: ir::Block) { use self::CursorPosition::*; match self.position() { At(inst) => { - self.layout_mut().split_ebb(new_ebb, inst); - // All other cases move to `After(ebb)`, but in this case we'll stay `At(inst)`. + self.layout_mut().split_block(new_block, inst); + // All other cases move to `After(block)`, but in this case we'll stay `At(inst)`. return; } - Nowhere => self.layout_mut().append_ebb(new_ebb), - Before(ebb) => self.layout_mut().insert_ebb(new_ebb, ebb), - After(ebb) => self.layout_mut().insert_ebb_after(new_ebb, ebb), + Nowhere => self.layout_mut().append_block(new_block), + Before(block) => self.layout_mut().insert_block(new_block, block), + After(block) => self.layout_mut().insert_block_after(new_block, block), } - // For everything but `At(inst)` we end up appending to the new EBB. - self.set_position(After(new_ebb)); + // For everything but `At(inst)` we end up appending to the new block. + self.set_position(After(new_block)); } } diff --git a/cranelift/codegen/src/dce.rs b/cranelift/codegen/src/dce.rs index 0aa9adc15b..b217534c3e 100644 --- a/cranelift/codegen/src/dce.rs +++ b/cranelift/codegen/src/dce.rs @@ -46,8 +46,8 @@ pub fn do_dce(func: &mut Function, domtree: &mut DominatorTree) { debug_assert!(domtree.is_valid()); let mut live = vec![false; func.dfg.num_values()]; - for &ebb in domtree.cfg_postorder() { - let mut pos = FuncCursor::new(func).at_bottom(ebb); + for &block in domtree.cfg_postorder() { + let mut pos = FuncCursor::new(func).at_bottom(block); while let Some(inst) = pos.prev_inst() { { let data = &pos.func.dfg[inst]; diff --git a/cranelift/codegen/src/dominator_tree.rs b/cranelift/codegen/src/dominator_tree.rs index 5251121cdc..d397fc7183 100644 --- a/cranelift/codegen/src/dominator_tree.rs +++ b/cranelift/codegen/src/dominator_tree.rs @@ -1,9 +1,9 @@ -//! A Dominator Tree represented as mappings of Ebbs to their immediate dominator. +//! A Dominator Tree represented as mappings of Blocks to their immediate dominator. use crate::entity::SecondaryMap; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir::instructions::BranchInfo; -use crate::ir::{Ebb, ExpandedProgramPoint, Function, Inst, Layout, ProgramOrder, Value}; +use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Layout, ProgramOrder, Value}; use crate::packed_option::PackedOption; use crate::timing; use alloc::vec::Vec; @@ -19,7 +19,7 @@ const STRIDE: u32 = 4; const DONE: u32 = 1; const SEEN: u32 = 2; -/// Dominator tree node. We keep one of these per EBB. +/// Dominator tree node. We keep one of these per block. #[derive(Clone, Default)] struct DomNode { /// Number of this node in a reverse post-order traversal of the CFG, starting from 1. @@ -28,7 +28,7 @@ struct DomNode { /// Unreachable nodes get number 0, all others are positive. rpo_number: u32, - /// The immediate dominator of this EBB, represented as the branch or jump instruction at the + /// The immediate dominator of this block, represented as the branch or jump instruction at the /// end of the dominating basic block. /// /// This is `None` for unreachable blocks and the entry block which doesn't have an immediate @@ -38,53 +38,53 @@ struct DomNode { /// The dominator tree for a single function. pub struct DominatorTree { - nodes: SecondaryMap, + nodes: SecondaryMap, - /// CFG post-order of all reachable EBBs. - postorder: Vec, + /// CFG post-order of all reachable blocks. + postorder: Vec, /// Scratch memory used by `compute_postorder()`. - stack: Vec, + stack: Vec, valid: bool, } /// Methods for querying the dominator tree. impl DominatorTree { - /// Is `ebb` reachable from the entry block? - pub fn is_reachable(&self, ebb: Ebb) -> bool { - self.nodes[ebb].rpo_number != 0 + /// Is `block` reachable from the entry block? + pub fn is_reachable(&self, block: Block) -> bool { + self.nodes[block].rpo_number != 0 } - /// Get the CFG post-order of EBBs that was used to compute the dominator tree. + /// Get the CFG post-order of blocks that was used to compute the dominator tree. /// /// Note that this post-order is not updated automatically when the CFG is modified. It is /// computed from scratch and cached by `compute()`. - pub fn cfg_postorder(&self) -> &[Ebb] { + pub fn cfg_postorder(&self) -> &[Block] { debug_assert!(self.is_valid()); &self.postorder } - /// Returns the immediate dominator of `ebb`. + /// Returns the immediate dominator of `block`. /// - /// The immediate dominator of an extended basic block is a basic block which we represent by + /// The immediate dominator of a basic block is a basic block which we represent by /// the branch or jump instruction at the end of the basic block. This does not have to be the - /// terminator of its EBB. + /// terminator of its block. /// - /// A branch or jump is said to *dominate* `ebb` if all control flow paths from the function - /// entry to `ebb` must go through the branch. + /// A branch or jump is said to *dominate* `block` if all control flow paths from the function + /// entry to `block` must go through the branch. /// - /// The *immediate dominator* is the dominator that is closest to `ebb`. All other dominators + /// The *immediate dominator* is the dominator that is closest to `block`. All other dominators /// also dominate the immediate dominator. /// - /// This returns `None` if `ebb` is not reachable from the entry EBB, or if it is the entry EBB + /// This returns `None` if `block` is not reachable from the entry block, or if it is the entry block /// which has no dominators. - pub fn idom(&self, ebb: Ebb) -> Option { - self.nodes[ebb].idom.into() + pub fn idom(&self, block: Block) -> Option { + self.nodes[block].idom.into() } - /// Compare two EBBs relative to the reverse post-order. - fn rpo_cmp_ebb(&self, a: Ebb, b: Ebb) -> Ordering { + /// Compare two blocks relative to the reverse post-order. + fn rpo_cmp_block(&self, a: Block, b: Block) -> Ordering { self.nodes[a].rpo_number.cmp(&self.nodes[b].rpo_number) } @@ -93,7 +93,7 @@ impl DominatorTree { /// /// Return `Ordering::Less` if `a` comes before `b` in the RPO. /// - /// If `a` and `b` belong to the same EBB, compare their relative position in the EBB. + /// If `a` and `b` belong to the same block, compare their relative position in the block. pub fn rpo_cmp(&self, a: A, b: B, layout: &Layout) -> Ordering where A: Into, @@ -101,7 +101,7 @@ impl DominatorTree { { let a = a.into(); let b = b.into(); - self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b)) + self.rpo_cmp_block(layout.pp_block(a), layout.pp_block(b)) .then(layout.cmp(a, b)) } @@ -110,7 +110,7 @@ impl DominatorTree { /// This means that every control-flow path from the function entry to `b` must go through `a`. /// /// Dominance is ill defined for unreachable blocks. This function can always determine - /// dominance for instructions in the same EBB, but otherwise returns `false` if either block + /// dominance for instructions in the same block, but otherwise returns `false` if either block /// is unreachable. /// /// An instruction is considered to dominate itself. @@ -122,12 +122,14 @@ impl DominatorTree { let a = a.into(); let b = b.into(); match a { - ExpandedProgramPoint::Ebb(ebb_a) => { - a == b || self.last_dominator(ebb_a, b, layout).is_some() + ExpandedProgramPoint::Block(block_a) => { + a == b || self.last_dominator(block_a, b, layout).is_some() } ExpandedProgramPoint::Inst(inst_a) => { - let ebb_a = layout.inst_ebb(inst_a).expect("Instruction not in layout."); - match self.last_dominator(ebb_a, b, layout) { + let block_a = layout + .inst_block(inst_a) + .expect("Instruction not in layout."); + match self.last_dominator(block_a, b, layout) { Some(last) => layout.cmp(inst_a, last) != Ordering::Greater, None => false, } @@ -137,14 +139,14 @@ impl DominatorTree { /// Find the last instruction in `a` that dominates `b`. /// If no instructions in `a` dominate `b`, return `None`. - pub fn last_dominator(&self, a: Ebb, b: B, layout: &Layout) -> Option + pub fn last_dominator(&self, a: Block, b: B, layout: &Layout) -> Option where B: Into, { - let (mut ebb_b, mut inst_b) = match b.into() { - ExpandedProgramPoint::Ebb(ebb) => (ebb, None), + let (mut block_b, mut inst_b) = match b.into() { + ExpandedProgramPoint::Block(block) => (block, None), ExpandedProgramPoint::Inst(inst) => ( - layout.inst_ebb(inst).expect("Instruction not in layout."), + layout.inst_block(inst).expect("Instruction not in layout."), Some(inst), ), }; @@ -152,15 +154,15 @@ impl DominatorTree { // Run a finger up the dominator tree from b until we see a. // Do nothing if b is unreachable. - while rpo_a < self.nodes[ebb_b].rpo_number { - let idom = match self.idom(ebb_b) { + while rpo_a < self.nodes[block_b].rpo_number { + let idom = match self.idom(block_b) { Some(idom) => idom, None => return None, // a is unreachable, so we climbed past the entry }; - ebb_b = layout.inst_ebb(idom).expect("Dominator got removed."); + block_b = layout.inst_block(idom).expect("Dominator got removed."); inst_b = Some(idom); } - if a == ebb_b { + if a == block_b { inst_b } else { None @@ -172,25 +174,25 @@ impl DominatorTree { /// Both basic blocks are assumed to be reachable. pub fn common_dominator( &self, - mut a: BasicBlock, - mut b: BasicBlock, + mut a: BlockPredecessor, + mut b: BlockPredecessor, layout: &Layout, - ) -> BasicBlock { + ) -> BlockPredecessor { loop { - match self.rpo_cmp_ebb(a.ebb, b.ebb) { + match self.rpo_cmp_block(a.block, b.block) { Ordering::Less => { // `a` comes before `b` in the RPO. Move `b` up. - let idom = self.nodes[b.ebb].idom.expect("Unreachable basic block?"); - b = BasicBlock::new( - layout.inst_ebb(idom).expect("Dangling idom instruction"), + let idom = self.nodes[b.block].idom.expect("Unreachable basic block?"); + b = BlockPredecessor::new( + layout.inst_block(idom).expect("Dangling idom instruction"), idom, ); } Ordering::Greater => { // `b` comes before `a` in the RPO. Move `a` up. - let idom = self.nodes[a.ebb].idom.expect("Unreachable basic block?"); - a = BasicBlock::new( - layout.inst_ebb(idom).expect("Dangling idom instruction"), + let idom = self.nodes[a.block].idom.expect("Unreachable basic block?"); + a = BlockPredecessor::new( + layout.inst_block(idom).expect("Dangling idom instruction"), idom, ); } @@ -199,11 +201,11 @@ impl DominatorTree { } debug_assert_eq!( - a.ebb, b.ebb, + a.block, b.block, "Unreachable block passed to common_dominator?" ); - // We're in the same EBB. The common dominator is the earlier instruction. + // We're in the same block. The common dominator is the earlier instruction. if layout.cmp(a.inst, b.inst) == Ordering::Less { a } else { @@ -226,10 +228,10 @@ impl DominatorTree { /// Allocate and compute a dominator tree. pub fn with_function(func: &Function, cfg: &ControlFlowGraph) -> Self { - let ebb_capacity = func.layout.ebb_capacity(); + let block_capacity = func.layout.block_capacity(); let mut domtree = Self { - nodes: SecondaryMap::with_capacity(ebb_capacity), - postorder: Vec::with_capacity(ebb_capacity), + nodes: SecondaryMap::with_capacity(block_capacity), + postorder: Vec::with_capacity(block_capacity), stack: Vec::new(), valid: false, }; @@ -266,13 +268,13 @@ impl DominatorTree { /// Reset all internal data structures and compute a post-order of the control flow graph. /// - /// This leaves `rpo_number == 1` for all reachable EBBs, 0 for unreachable ones. + /// This leaves `rpo_number == 1` for all reachable blocks, 0 for unreachable ones. fn compute_postorder(&mut self, func: &Function) { self.clear(); - self.nodes.resize(func.dfg.num_ebbs()); + self.nodes.resize(func.dfg.num_blocks()); // This algorithm is a depth first traversal (DFT) of the control flow graph, computing a - // post-order of the EBBs that are reachable form the entry block. A DFT post-order is not + // post-order of the blocks that are reachable form the entry block. A DFT post-order is not // unique. The specific order we get is controlled by two factors: // // 1. The order each node's children are visited, and @@ -280,76 +282,76 @@ impl DominatorTree { // // There are two ways of viewing the CFG as a graph: // - // 1. Each EBB is a node, with outgoing edges for all the branches in the EBB. + // 1. Each block is a node, with outgoing edges for all the branches in the block. // 2. Each basic block is a node, with outgoing edges for the single branch at the end of - // the BB. (An EBB is a linear sequence of basic blocks). + // the BB. (An block is a linear sequence of basic blocks). // - // The first graph is a contraction of the second one. We want to compute an EBB post-order + // The first graph is a contraction of the second one. We want to compute an block post-order // that is compatible both graph interpretations. That is, if you compute a BB post-order - // and then remove those BBs that do not correspond to EBB headers, you get a post-order of - // the EBB graph. + // and then remove those BBs that do not correspond to block headers, you get a post-order of + // the block graph. // // Node child order: // // In the BB graph, we always go down the fall-through path first and follow the branch // destination second. // - // In the EBB graph, this is equivalent to visiting EBB successors in a bottom-up - // order, starting from the destination of the EBB's terminating jump, ending at the - // destination of the first branch in the EBB. + // In the block graph, this is equivalent to visiting block successors in a bottom-up + // order, starting from the destination of the block's terminating jump, ending at the + // destination of the first branch in the block. // // Edge pruning: // - // In the BB graph, we keep an edge to an EBB the first time we visit the *source* side - // of the edge. Any subsequent edges to the same EBB are pruned. + // In the BB graph, we keep an edge to an block the first time we visit the *source* side + // of the edge. Any subsequent edges to the same block are pruned. // - // The equivalent tree is reached in the EBB graph by keeping the first edge to an EBB + // The equivalent tree is reached in the block graph by keeping the first edge to an block // in a top-down traversal of the successors. (And then visiting edges in a bottom-up // order). // // This pruning method makes it possible to compute the DFT without storing lots of - // information about the progress through an EBB. + // information about the progress through an block. // During this algorithm only, use `rpo_number` to hold the following state: // - // 0: EBB has not yet been reached in the pre-order. - // SEEN: EBB has been pushed on the stack but successors not yet pushed. + // 0: block has not yet been reached in the pre-order. + // SEEN: block has been pushed on the stack but successors not yet pushed. // DONE: Successors pushed. match func.layout.entry_block() { - Some(ebb) => { - self.stack.push(ebb); - self.nodes[ebb].rpo_number = SEEN; + Some(block) => { + self.stack.push(block); + self.nodes[block].rpo_number = SEEN; } None => return, } - while let Some(ebb) = self.stack.pop() { - match self.nodes[ebb].rpo_number { + while let Some(block) = self.stack.pop() { + match self.nodes[block].rpo_number { SEEN => { - // This is the first time we pop the EBB, so we need to scan its successors and + // This is the first time we pop the block, so we need to scan its successors and // then revisit it. - self.nodes[ebb].rpo_number = DONE; - self.stack.push(ebb); - self.push_successors(func, ebb); + self.nodes[block].rpo_number = DONE; + self.stack.push(block); + self.push_successors(func, block); } DONE => { - // This is the second time we pop the EBB, so all successors have been + // This is the second time we pop the block, so all successors have been // processed. - self.postorder.push(ebb); + self.postorder.push(block); } _ => unreachable!(), } } } - /// Push `ebb` successors onto `self.stack`, filtering out those that have already been seen. + /// Push `block` successors onto `self.stack`, filtering out those that have already been seen. /// /// The successors are pushed in program order which is important to get a split-invariant - /// post-order. Split-invariant means that if an EBB is split in two, we get the same - /// post-order except for the insertion of the new EBB header at the split point. - fn push_successors(&mut self, func: &Function, ebb: Ebb) { - for inst in func.layout.ebb_insts(ebb) { + /// post-order. Split-invariant means that if an block is split in two, we get the same + /// post-order except for the insertion of the new block header at the split point. + fn push_successors(&mut self, func: &Function, block: Block) { + for inst in func.layout.block_insts(block) { match func.dfg.analyze_branch(inst) { BranchInfo::SingleDest(succ, _) => self.push_if_unseen(succ), BranchInfo::Table(jt, dest) => { @@ -365,11 +367,11 @@ impl DominatorTree { } } - /// Push `ebb` onto `self.stack` if it has not already been seen. - fn push_if_unseen(&mut self, ebb: Ebb) { - if self.nodes[ebb].rpo_number == 0 { - self.nodes[ebb].rpo_number = SEEN; - self.stack.push(ebb); + /// Push `block` onto `self.stack` if it has not already been seen. + fn push_if_unseen(&mut self, block: Block) { + if self.nodes[block].rpo_number == 0 { + self.nodes[block].rpo_number = SEEN; + self.stack.push(block); } } @@ -378,10 +380,10 @@ impl DominatorTree { fn compute_domtree(&mut self, func: &Function, cfg: &ControlFlowGraph) { // During this algorithm, `rpo_number` has the following values: // - // 0: EBB is not reachable. - // 1: EBB is reachable, but has not yet been visited during the first pass. This is set by + // 0: block is not reachable. + // 1: block is reachable, but has not yet been visited during the first pass. This is set by // `compute_postorder`. - // 2+: EBB is reachable and has an assigned RPO number. + // 2+: block is reachable and has an assigned RPO number. // We'll be iterating over a reverse post-order of the CFG, skipping the entry block. let (entry_block, postorder) = match self.postorder.as_slice().split_last() { @@ -392,7 +394,7 @@ impl DominatorTree { // Do a first pass where we assign RPO numbers to all reachable nodes. self.nodes[entry_block].rpo_number = 2 * STRIDE; - for (rpo_idx, &ebb) in postorder.iter().rev().enumerate() { + for (rpo_idx, &block) in postorder.iter().rev().enumerate() { // Update the current node and give it an RPO number. // The entry block got 2, the rest start at 3 by multiples of STRIDE to leave // room for future dominator tree modifications. @@ -402,8 +404,8 @@ impl DominatorTree { // // Due to the nature of the post-order traversal, every node we visit will have at // least one predecessor that has previously been visited during this RPO. - self.nodes[ebb] = DomNode { - idom: self.compute_idom(ebb, cfg, &func.layout).into(), + self.nodes[block] = DomNode { + idom: self.compute_idom(block, cfg, &func.layout).into(), rpo_number: (rpo_idx as u32 + 3) * STRIDE, } } @@ -415,30 +417,30 @@ impl DominatorTree { let mut changed = true; while changed { changed = false; - for &ebb in postorder.iter().rev() { - let idom = self.compute_idom(ebb, cfg, &func.layout).into(); - if self.nodes[ebb].idom != idom { - self.nodes[ebb].idom = idom; + for &block in postorder.iter().rev() { + let idom = self.compute_idom(block, cfg, &func.layout).into(); + if self.nodes[block].idom != idom { + self.nodes[block].idom = idom; changed = true; } } } } - // Compute the immediate dominator for `ebb` using the current `idom` states for the reachable + // Compute the immediate dominator for `block` using the current `idom` states for the reachable // nodes. - fn compute_idom(&self, ebb: Ebb, cfg: &ControlFlowGraph, layout: &Layout) -> Inst { - // Get an iterator with just the reachable, already visited predecessors to `ebb`. + fn compute_idom(&self, block: Block, cfg: &ControlFlowGraph, layout: &Layout) -> Inst { + // Get an iterator with just the reachable, already visited predecessors to `block`. // Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't // been visited yet, 0 for unreachable blocks. let mut reachable_preds = cfg - .pred_iter(ebb) - .filter(|&BasicBlock { ebb: pred, .. }| self.nodes[pred].rpo_number > 1); + .pred_iter(block) + .filter(|&BlockPredecessor { block: pred, .. }| self.nodes[pred].rpo_number > 1); // The RPO must visit at least one predecessor before this node. let mut idom = reachable_preds .next() - .expect("EBB node must have one reachable predecessor"); + .expect("block node must have one reachable predecessor"); for pred in reachable_preds { idom = self.common_dominator(idom, pred, layout); @@ -453,25 +455,25 @@ impl DominatorTree { /// This data structure is computed from a `DominatorTree` and provides: /// /// - A forward traversable dominator tree through the `children()` iterator. -/// - An ordering of EBBs according to a dominator tree pre-order. -/// - Constant time dominance checks at the EBB granularity. +/// - An ordering of blocks according to a dominator tree pre-order. +/// - Constant time dominance checks at the block granularity. /// /// The information in this auxiliary data structure is not easy to update when the control flow /// graph changes, which is why it is kept separate. pub struct DominatorTreePreorder { - nodes: SecondaryMap, + nodes: SecondaryMap, // Scratch memory used by `compute_postorder()`. - stack: Vec, + stack: Vec, } #[derive(Default, Clone)] struct ExtraNode { /// First child node in the domtree. - child: PackedOption, + child: PackedOption, /// Next sibling node in the domtree. This linked list is ordered according to the CFG RPO. - sibling: PackedOption, + sibling: PackedOption, /// Sequence number for this node in a pre-order traversal of the dominator tree. /// Unreachable blocks have number 0, the entry block is 1. @@ -501,23 +503,23 @@ impl DominatorTreePreorder { // // By following the CFG post-order and pushing to the front of the lists, we make sure that // sibling lists are ordered according to the CFG reverse post-order. - for &ebb in domtree.cfg_postorder() { - if let Some(idom_inst) = domtree.idom(ebb) { - let idom = layout.pp_ebb(idom_inst); - let sib = mem::replace(&mut self.nodes[idom].child, ebb.into()); - self.nodes[ebb].sibling = sib; + for &block in domtree.cfg_postorder() { + if let Some(idom_inst) = domtree.idom(block) { + let idom = layout.pp_block(idom_inst); + let sib = mem::replace(&mut self.nodes[idom].child, block.into()); + self.nodes[block].sibling = sib; } else { - // The only EBB without an immediate dominator is the entry. - self.stack.push(ebb); + // The only block without an immediate dominator is the entry. + self.stack.push(block); } } // Step 2. Assign pre-order numbers from a DFS of the dominator tree. debug_assert!(self.stack.len() <= 1); let mut n = 0; - while let Some(ebb) = self.stack.pop() { + while let Some(block) = self.stack.pop() { n += 1; - let node = &mut self.nodes[ebb]; + let node = &mut self.nodes[block]; node.pre_number = n; node.pre_max = n; if let Some(n) = node.sibling.expand() { @@ -531,29 +533,29 @@ impl DominatorTreePreorder { // Step 3. Propagate the `pre_max` numbers up the tree. // The CFG post-order is topologically ordered w.r.t. dominance so a node comes after all // its dominator tree children. - for &ebb in domtree.cfg_postorder() { - if let Some(idom_inst) = domtree.idom(ebb) { - let idom = layout.pp_ebb(idom_inst); - let pre_max = cmp::max(self.nodes[ebb].pre_max, self.nodes[idom].pre_max); + for &block in domtree.cfg_postorder() { + if let Some(idom_inst) = domtree.idom(block) { + let idom = layout.pp_block(idom_inst); + let pre_max = cmp::max(self.nodes[block].pre_max, self.nodes[idom].pre_max); self.nodes[idom].pre_max = pre_max; } } } } -/// An iterator that enumerates the direct children of an EBB in the dominator tree. +/// An iterator that enumerates the direct children of an block in the dominator tree. pub struct ChildIter<'a> { dtpo: &'a DominatorTreePreorder, - next: PackedOption, + next: PackedOption, } impl<'a> Iterator for ChildIter<'a> { - type Item = Ebb; + type Item = Block; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option { let n = self.next.expand(); - if let Some(ebb) = n { - self.next = self.dtpo.nodes[ebb].sibling; + if let Some(block) = n { + self.next = self.dtpo.nodes[block].sibling; } n } @@ -561,32 +563,32 @@ impl<'a> Iterator for ChildIter<'a> { /// Query interface for the dominator tree pre-order. impl DominatorTreePreorder { - /// Get an iterator over the direct children of `ebb` in the dominator tree. + /// Get an iterator over the direct children of `block` in the dominator tree. /// - /// These are the EBB's whose immediate dominator is an instruction in `ebb`, ordered according + /// These are the block's whose immediate dominator is an instruction in `block`, ordered according /// to the CFG reverse post-order. - pub fn children(&self, ebb: Ebb) -> ChildIter { + pub fn children(&self, block: Block) -> ChildIter { ChildIter { dtpo: self, - next: self.nodes[ebb].child, + next: self.nodes[block].child, } } - /// Fast, constant time dominance check with EBB granularity. + /// Fast, constant time dominance check with block granularity. /// /// This computes the same result as `domtree.dominates(a, b)`, but in guaranteed fast constant - /// time. This is less general than the `DominatorTree` method because it only works with EBB + /// time. This is less general than the `DominatorTree` method because it only works with block /// program points. /// - /// An EBB is considered to dominate itself. - pub fn dominates(&self, a: Ebb, b: Ebb) -> bool { + /// An block is considered to dominate itself. + pub fn dominates(&self, a: Block, b: Block) -> bool { let na = &self.nodes[a]; let nb = &self.nodes[b]; na.pre_number <= nb.pre_number && na.pre_max >= nb.pre_max } - /// Compare two EBBs according to the dominator pre-order. - pub fn pre_cmp_ebb(&self, a: Ebb, b: Ebb) -> Ordering { + /// Compare two blocks according to the dominator pre-order. + pub fn pre_cmp_block(&self, a: Block, b: Block) -> Ordering { self.nodes[a].pre_number.cmp(&self.nodes[b].pre_number) } @@ -601,7 +603,7 @@ impl DominatorTreePreorder { { let a = a.into(); let b = b.into(); - self.pre_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b)) + self.pre_cmp_block(layout.pp_block(a), layout.pp_block(b)) .then(layout.cmp(a, b)) } @@ -643,23 +645,23 @@ mod tests { #[test] fn unreachable_node() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let v0 = func.dfg.append_ebb_param(ebb0, I32); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let v0 = func.dfg.append_block_param(block0, I32); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.ins().brnz(v0, ebb2, &[]); + cur.insert_block(block0); + cur.ins().brnz(v0, block2, &[]); cur.ins().trap(TrapCode::User(0)); - cur.insert_ebb(ebb1); + cur.insert_block(block1); let v1 = cur.ins().iconst(I32, 1); let v2 = cur.ins().iadd(v0, v1); - cur.ins().jump(ebb0, &[v2]); + cur.ins().jump(block0, &[v2]); - cur.insert_ebb(ebb2); + cur.insert_block(block2); cur.ins().return_(&[v0]); let cfg = ControlFlowGraph::with_function(cur.func); @@ -667,96 +669,99 @@ mod tests { // Fall-through-first, prune-at-source DFT: // - // ebb0 { - // brnz ebb2 { + // block0 { + // brnz block2 { // trap - // ebb2 { + // block2 { // return - // } ebb2 - // } ebb0 - assert_eq!(dt.cfg_postorder(), &[ebb2, ebb0]); + // } block2 + // } block0 + assert_eq!(dt.cfg_postorder(), &[block2, block0]); let v2_def = cur.func.dfg.value_def(v2).unwrap_inst(); - assert!(!dt.dominates(v2_def, ebb0, &cur.func.layout)); - assert!(!dt.dominates(ebb0, v2_def, &cur.func.layout)); + assert!(!dt.dominates(v2_def, block0, &cur.func.layout)); + assert!(!dt.dominates(block0, v2_def, &cur.func.layout)); let mut dtpo = DominatorTreePreorder::new(); dtpo.compute(&dt, &cur.func.layout); - assert!(dtpo.dominates(ebb0, ebb0)); - assert!(!dtpo.dominates(ebb0, ebb1)); - assert!(dtpo.dominates(ebb0, ebb2)); - assert!(!dtpo.dominates(ebb1, ebb0)); - assert!(dtpo.dominates(ebb1, ebb1)); - assert!(!dtpo.dominates(ebb1, ebb2)); - assert!(!dtpo.dominates(ebb2, ebb0)); - assert!(!dtpo.dominates(ebb2, ebb1)); - assert!(dtpo.dominates(ebb2, ebb2)); + assert!(dtpo.dominates(block0, block0)); + assert!(!dtpo.dominates(block0, block1)); + assert!(dtpo.dominates(block0, block2)); + assert!(!dtpo.dominates(block1, block0)); + assert!(dtpo.dominates(block1, block1)); + assert!(!dtpo.dominates(block1, block2)); + assert!(!dtpo.dominates(block2, block0)); + assert!(!dtpo.dominates(block2, block1)); + assert!(dtpo.dominates(block2, block2)); } #[test] fn non_zero_entry_block() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); - let ebb3 = func.dfg.make_ebb(); - let cond = func.dfg.append_ebb_param(ebb3, I32); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); + let block3 = func.dfg.make_block(); + let cond = func.dfg.append_block_param(block3, I32); let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb3); - let jmp_ebb3_ebb1 = cur.ins().jump(ebb1, &[]); + cur.insert_block(block3); + let jmp_block3_block1 = cur.ins().jump(block1, &[]); - cur.insert_ebb(ebb1); - let br_ebb1_ebb0 = cur.ins().brnz(cond, ebb0, &[]); - let jmp_ebb1_ebb2 = cur.ins().jump(ebb2, &[]); + cur.insert_block(block1); + let br_block1_block0 = cur.ins().brnz(cond, block0, &[]); + let jmp_block1_block2 = cur.ins().jump(block2, &[]); - cur.insert_ebb(ebb2); - cur.ins().jump(ebb0, &[]); + cur.insert_block(block2); + cur.ins().jump(block0, &[]); - cur.insert_ebb(ebb0); + cur.insert_block(block0); let cfg = ControlFlowGraph::with_function(cur.func); let dt = DominatorTree::with_function(cur.func, &cfg); // Fall-through-first, prune-at-source DFT: // - // ebb3 { - // ebb3:jump ebb1 { - // ebb1 { - // ebb1:brnz ebb0 { - // ebb1:jump ebb2 { - // ebb2 { - // ebb2:jump ebb0 (seen) - // } ebb2 - // } ebb1:jump ebb2 - // ebb0 { - // } ebb0 - // } ebb1:brnz ebb0 - // } ebb1 - // } ebb3:jump ebb1 - // } ebb3 + // block3 { + // block3:jump block1 { + // block1 { + // block1:brnz block0 { + // block1:jump block2 { + // block2 { + // block2:jump block0 (seen) + // } block2 + // } block1:jump block2 + // block0 { + // } block0 + // } block1:brnz block0 + // } block1 + // } block3:jump block1 + // } block3 - assert_eq!(dt.cfg_postorder(), &[ebb2, ebb0, ebb1, ebb3]); + assert_eq!(dt.cfg_postorder(), &[block2, block0, block1, block3]); - assert_eq!(cur.func.layout.entry_block().unwrap(), ebb3); - assert_eq!(dt.idom(ebb3), None); - assert_eq!(dt.idom(ebb1).unwrap(), jmp_ebb3_ebb1); - assert_eq!(dt.idom(ebb2).unwrap(), jmp_ebb1_ebb2); - assert_eq!(dt.idom(ebb0).unwrap(), br_ebb1_ebb0); + assert_eq!(cur.func.layout.entry_block().unwrap(), block3); + assert_eq!(dt.idom(block3), None); + assert_eq!(dt.idom(block1).unwrap(), jmp_block3_block1); + assert_eq!(dt.idom(block2).unwrap(), jmp_block1_block2); + assert_eq!(dt.idom(block0).unwrap(), br_block1_block0); - assert!(dt.dominates(br_ebb1_ebb0, br_ebb1_ebb0, &cur.func.layout)); - assert!(!dt.dominates(br_ebb1_ebb0, jmp_ebb3_ebb1, &cur.func.layout)); - assert!(dt.dominates(jmp_ebb3_ebb1, br_ebb1_ebb0, &cur.func.layout)); + assert!(dt.dominates(br_block1_block0, br_block1_block0, &cur.func.layout)); + assert!(!dt.dominates(br_block1_block0, jmp_block3_block1, &cur.func.layout)); + assert!(dt.dominates(jmp_block3_block1, br_block1_block0, &cur.func.layout)); - assert_eq!(dt.rpo_cmp(ebb3, ebb3, &cur.func.layout), Ordering::Equal); - assert_eq!(dt.rpo_cmp(ebb3, ebb1, &cur.func.layout), Ordering::Less); assert_eq!( - dt.rpo_cmp(ebb3, jmp_ebb3_ebb1, &cur.func.layout), + dt.rpo_cmp(block3, block3, &cur.func.layout), + Ordering::Equal + ); + assert_eq!(dt.rpo_cmp(block3, block1, &cur.func.layout), Ordering::Less); + assert_eq!( + dt.rpo_cmp(block3, jmp_block3_block1, &cur.func.layout), Ordering::Less ); assert_eq!( - dt.rpo_cmp(jmp_ebb3_ebb1, jmp_ebb1_ebb2, &cur.func.layout), + dt.rpo_cmp(jmp_block3_block1, jmp_block1_block2, &cur.func.layout), Ordering::Less ); } @@ -764,69 +769,69 @@ mod tests { #[test] fn backwards_layout() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - let jmp02 = cur.ins().jump(ebb2, &[]); + cur.insert_block(block0); + let jmp02 = cur.ins().jump(block2, &[]); - cur.insert_ebb(ebb1); + cur.insert_block(block1); let trap = cur.ins().trap(TrapCode::User(5)); - cur.insert_ebb(ebb2); - let jmp21 = cur.ins().jump(ebb1, &[]); + cur.insert_block(block2); + let jmp21 = cur.ins().jump(block1, &[]); let cfg = ControlFlowGraph::with_function(cur.func); let dt = DominatorTree::with_function(cur.func, &cfg); - assert_eq!(cur.func.layout.entry_block(), Some(ebb0)); - assert_eq!(dt.idom(ebb0), None); - assert_eq!(dt.idom(ebb1), Some(jmp21)); - assert_eq!(dt.idom(ebb2), Some(jmp02)); + assert_eq!(cur.func.layout.entry_block(), Some(block0)); + assert_eq!(dt.idom(block0), None); + assert_eq!(dt.idom(block1), Some(jmp21)); + assert_eq!(dt.idom(block2), Some(jmp02)); - assert!(dt.dominates(ebb0, ebb0, &cur.func.layout)); - assert!(dt.dominates(ebb0, jmp02, &cur.func.layout)); - assert!(dt.dominates(ebb0, ebb1, &cur.func.layout)); - assert!(dt.dominates(ebb0, trap, &cur.func.layout)); - assert!(dt.dominates(ebb0, ebb2, &cur.func.layout)); - assert!(dt.dominates(ebb0, jmp21, &cur.func.layout)); + assert!(dt.dominates(block0, block0, &cur.func.layout)); + assert!(dt.dominates(block0, jmp02, &cur.func.layout)); + assert!(dt.dominates(block0, block1, &cur.func.layout)); + assert!(dt.dominates(block0, trap, &cur.func.layout)); + assert!(dt.dominates(block0, block2, &cur.func.layout)); + assert!(dt.dominates(block0, jmp21, &cur.func.layout)); - assert!(!dt.dominates(jmp02, ebb0, &cur.func.layout)); + assert!(!dt.dominates(jmp02, block0, &cur.func.layout)); assert!(dt.dominates(jmp02, jmp02, &cur.func.layout)); - assert!(dt.dominates(jmp02, ebb1, &cur.func.layout)); + assert!(dt.dominates(jmp02, block1, &cur.func.layout)); assert!(dt.dominates(jmp02, trap, &cur.func.layout)); - assert!(dt.dominates(jmp02, ebb2, &cur.func.layout)); + assert!(dt.dominates(jmp02, block2, &cur.func.layout)); assert!(dt.dominates(jmp02, jmp21, &cur.func.layout)); - assert!(!dt.dominates(ebb1, ebb0, &cur.func.layout)); - assert!(!dt.dominates(ebb1, jmp02, &cur.func.layout)); - assert!(dt.dominates(ebb1, ebb1, &cur.func.layout)); - assert!(dt.dominates(ebb1, trap, &cur.func.layout)); - assert!(!dt.dominates(ebb1, ebb2, &cur.func.layout)); - assert!(!dt.dominates(ebb1, jmp21, &cur.func.layout)); + assert!(!dt.dominates(block1, block0, &cur.func.layout)); + assert!(!dt.dominates(block1, jmp02, &cur.func.layout)); + assert!(dt.dominates(block1, block1, &cur.func.layout)); + assert!(dt.dominates(block1, trap, &cur.func.layout)); + assert!(!dt.dominates(block1, block2, &cur.func.layout)); + assert!(!dt.dominates(block1, jmp21, &cur.func.layout)); - assert!(!dt.dominates(trap, ebb0, &cur.func.layout)); + assert!(!dt.dominates(trap, block0, &cur.func.layout)); assert!(!dt.dominates(trap, jmp02, &cur.func.layout)); - assert!(!dt.dominates(trap, ebb1, &cur.func.layout)); + assert!(!dt.dominates(trap, block1, &cur.func.layout)); assert!(dt.dominates(trap, trap, &cur.func.layout)); - assert!(!dt.dominates(trap, ebb2, &cur.func.layout)); + assert!(!dt.dominates(trap, block2, &cur.func.layout)); assert!(!dt.dominates(trap, jmp21, &cur.func.layout)); - assert!(!dt.dominates(ebb2, ebb0, &cur.func.layout)); - assert!(!dt.dominates(ebb2, jmp02, &cur.func.layout)); - assert!(dt.dominates(ebb2, ebb1, &cur.func.layout)); - assert!(dt.dominates(ebb2, trap, &cur.func.layout)); - assert!(dt.dominates(ebb2, ebb2, &cur.func.layout)); - assert!(dt.dominates(ebb2, jmp21, &cur.func.layout)); + assert!(!dt.dominates(block2, block0, &cur.func.layout)); + assert!(!dt.dominates(block2, jmp02, &cur.func.layout)); + assert!(dt.dominates(block2, block1, &cur.func.layout)); + assert!(dt.dominates(block2, trap, &cur.func.layout)); + assert!(dt.dominates(block2, block2, &cur.func.layout)); + assert!(dt.dominates(block2, jmp21, &cur.func.layout)); - assert!(!dt.dominates(jmp21, ebb0, &cur.func.layout)); + assert!(!dt.dominates(jmp21, block0, &cur.func.layout)); assert!(!dt.dominates(jmp21, jmp02, &cur.func.layout)); - assert!(dt.dominates(jmp21, ebb1, &cur.func.layout)); + assert!(dt.dominates(jmp21, block1, &cur.func.layout)); assert!(dt.dominates(jmp21, trap, &cur.func.layout)); - assert!(!dt.dominates(jmp21, ebb2, &cur.func.layout)); + assert!(!dt.dominates(jmp21, block2, &cur.func.layout)); assert!(dt.dominates(jmp21, jmp21, &cur.func.layout)); } } diff --git a/cranelift/codegen/src/flowgraph.rs b/cranelift/codegen/src/flowgraph.rs index f53cfa6bff..37245da912 100644 --- a/cranelift/codegen/src/flowgraph.rs +++ b/cranelift/codegen/src/flowgraph.rs @@ -1,80 +1,80 @@ -//! A control flow graph represented as mappings of extended basic blocks to their predecessors +//! A control flow graph represented as mappings of basic blocks to their predecessors //! and successors. //! -//! Successors are represented as extended basic blocks while predecessors are represented by basic -//! blocks. Basic blocks are denoted by tuples of EBB and branch/jump instructions. Each +//! Successors are represented as basic blocks while predecessors are represented by basic +//! blocks. Basic blocks are denoted by tuples of block and branch/jump instructions. Each //! predecessor tuple corresponds to the end of a basic block. //! //! ```c -//! Ebb0: +//! Block0: //! ... ; beginning of basic block //! //! ... //! -//! brz vx, Ebb1 ; end of basic block +//! brz vx, Block1 ; end of basic block //! //! ... ; beginning of basic block //! //! ... //! -//! jmp Ebb2 ; end of basic block +//! jmp Block2 ; end of basic block //! ``` //! -//! Here `Ebb1` and `Ebb2` would each have a single predecessor denoted as `(Ebb0, brz)` -//! and `(Ebb0, jmp Ebb2)` respectively. +//! Here `Block1` and `Block2` would each have a single predecessor denoted as `(Block0, brz)` +//! and `(Block0, jmp Block2)` respectively. use crate::bforest; use crate::entity::SecondaryMap; use crate::ir::instructions::BranchInfo; -use crate::ir::{Ebb, Function, Inst}; +use crate::ir::{Block, Function, Inst}; use crate::timing; use core::mem; -/// A basic block denoted by its enclosing Ebb and last instruction. +/// A basic block denoted by its enclosing Block and last instruction. #[derive(Debug, PartialEq, Eq)] -pub struct BasicBlock { - /// Enclosing Ebb key. - pub ebb: Ebb, +pub struct BlockPredecessor { + /// Enclosing Block key. + pub block: Block, /// Last instruction in the basic block. pub inst: Inst, } -impl BasicBlock { - /// Convenient method to construct new BasicBlock. - pub fn new(ebb: Ebb, inst: Inst) -> Self { - Self { ebb, inst } +impl BlockPredecessor { + /// Convenient method to construct new BlockPredecessor. + pub fn new(block: Block, inst: Inst) -> Self { + Self { block, inst } } } -/// A container for the successors and predecessors of some Ebb. +/// A container for the successors and predecessors of some Block. #[derive(Clone, Default)] struct CFGNode { - /// Instructions that can branch or jump to this EBB. + /// Instructions that can branch or jump to this block. /// - /// This maps branch instruction -> predecessor EBB which is redundant since the EBB containing - /// the branch instruction is available from the `layout.inst_ebb()` method. We store the + /// This maps branch instruction -> predecessor block which is redundant since the block containing + /// the branch instruction is available from the `layout.inst_block()` method. We store the /// redundant information because: /// - /// 1. Many `pred_iter()` consumers want the EBB anyway, so it is handily available. - /// 2. The `invalidate_ebb_successors()` may be called *after* branches have been removed from - /// their EBB, but we still need to remove them form the old EBB predecessor map. + /// 1. Many `pred_iter()` consumers want the block anyway, so it is handily available. + /// 2. The `invalidate_block_successors()` may be called *after* branches have been removed from + /// their block, but we still need to remove them form the old block predecessor map. /// - /// The redundant EBB stored here is always consistent with the CFG successor lists, even after + /// The redundant block stored here is always consistent with the CFG successor lists, even after /// the IR has been edited. - pub predecessors: bforest::Map, + pub predecessors: bforest::Map, - /// Set of EBBs that are the targets of branches and jumps in this EBB. - /// The set is ordered by EBB number, indicated by the `()` comparator type. - pub successors: bforest::Set, + /// Set of blocks that are the targets of branches and jumps in this block. + /// The set is ordered by block number, indicated by the `()` comparator type. + pub successors: bforest::Set, } -/// The Control Flow Graph maintains a mapping of ebbs to their predecessors +/// The Control Flow Graph maintains a mapping of blocks to their predecessors /// and successors where predecessors are basic blocks and successors are -/// extended basic blocks. +/// basic blocks. pub struct ControlFlowGraph { - data: SecondaryMap, - pred_forest: bforest::MapForest, - succ_forest: bforest::SetForest, + data: SecondaryMap, + pred_forest: bforest::MapForest, + succ_forest: bforest::SetForest, valid: bool, } @@ -110,27 +110,27 @@ impl ControlFlowGraph { pub fn compute(&mut self, func: &Function) { let _tt = timing::flowgraph(); self.clear(); - self.data.resize(func.dfg.num_ebbs()); + self.data.resize(func.dfg.num_blocks()); - for ebb in &func.layout { - self.compute_ebb(func, ebb); + for block in &func.layout { + self.compute_block(func, block); } self.valid = true; } - fn compute_ebb(&mut self, func: &Function, ebb: Ebb) { - for inst in func.layout.ebb_insts(ebb) { + fn compute_block(&mut self, func: &Function, block: Block) { + for inst in func.layout.block_insts(block) { match func.dfg.analyze_branch(inst) { BranchInfo::SingleDest(dest, _) => { - self.add_edge(ebb, inst, dest); + self.add_edge(block, inst, dest); } BranchInfo::Table(jt, dest) => { if let Some(dest) = dest { - self.add_edge(ebb, inst, dest); + self.add_edge(block, inst, dest); } for dest in func.jump_tables[jt].iter() { - self.add_edge(ebb, inst, *dest); + self.add_edge(block, inst, *dest); } } BranchInfo::NotABranch => {} @@ -138,32 +138,32 @@ impl ControlFlowGraph { } } - fn invalidate_ebb_successors(&mut self, ebb: Ebb) { + fn invalidate_block_successors(&mut self, block: Block) { // Temporarily take ownership because we need mutable access to self.data inside the loop. // Unfortunately borrowck cannot see that our mut accesses to predecessors don't alias // our iteration over successors. - let mut successors = mem::replace(&mut self.data[ebb].successors, Default::default()); + let mut successors = mem::replace(&mut self.data[block].successors, Default::default()); for succ in successors.iter(&self.succ_forest) { self.data[succ] .predecessors - .retain(&mut self.pred_forest, |_, &mut e| e != ebb); + .retain(&mut self.pred_forest, |_, &mut e| e != block); } successors.clear(&mut self.succ_forest); } - /// Recompute the control flow graph of `ebb`. + /// Recompute the control flow graph of `block`. /// - /// This is for use after modifying instructions within a specific EBB. It recomputes all edges - /// from `ebb` while leaving edges to `ebb` intact. Its functionality a subset of that of the + /// This is for use after modifying instructions within a specific block. It recomputes all edges + /// from `block` while leaving edges to `block` intact. Its functionality a subset of that of the /// more expensive `compute`, and should be used when we know we don't need to recompute the CFG - /// from scratch, but rather that our changes have been restricted to specific EBBs. - pub fn recompute_ebb(&mut self, func: &Function, ebb: Ebb) { + /// from scratch, but rather that our changes have been restricted to specific blocks. + pub fn recompute_block(&mut self, func: &Function, block: Block) { debug_assert!(self.is_valid()); - self.invalidate_ebb_successors(ebb); - self.compute_ebb(func, ebb); + self.invalidate_block_successors(block); + self.compute_block(func, block); } - fn add_edge(&mut self, from: Ebb, from_inst: Inst, to: Ebb) { + fn add_edge(&mut self, from: Block, from_inst: Inst, to: Block) { self.data[from] .successors .insert(to, &mut self.succ_forest, &()); @@ -172,15 +172,15 @@ impl ControlFlowGraph { .insert(from_inst, from, &mut self.pred_forest, &()); } - /// Get an iterator over the CFG predecessors to `ebb`. - pub fn pred_iter(&self, ebb: Ebb) -> PredIter { - PredIter(self.data[ebb].predecessors.iter(&self.pred_forest)) + /// Get an iterator over the CFG predecessors to `block`. + pub fn pred_iter(&self, block: Block) -> PredIter { + PredIter(self.data[block].predecessors.iter(&self.pred_forest)) } - /// Get an iterator over the CFG successors to `ebb`. - pub fn succ_iter(&self, ebb: Ebb) -> SuccIter { + /// Get an iterator over the CFG successors to `block`. + pub fn succ_iter(&self, block: Block) -> SuccIter { debug_assert!(self.is_valid()); - self.data[ebb].successors.iter(&self.succ_forest) + self.data[block].successors.iter(&self.succ_forest) } /// Check if the CFG is in a valid state. @@ -193,21 +193,21 @@ impl ControlFlowGraph { } } -/// An iterator over EBB predecessors. The iterator type is `BasicBlock`. +/// An iterator over block predecessors. The iterator type is `BlockPredecessor`. /// -/// Each predecessor is an instruction that branches to the EBB. -pub struct PredIter<'a>(bforest::MapIter<'a, Inst, Ebb>); +/// Each predecessor is an instruction that branches to the block. +pub struct PredIter<'a>(bforest::MapIter<'a, Inst, Block>); impl<'a> Iterator for PredIter<'a> { - type Item = BasicBlock; + type Item = BlockPredecessor; - fn next(&mut self) -> Option { - self.0.next().map(|(i, e)| BasicBlock::new(e, i)) + fn next(&mut self) -> Option { + self.0.next().map(|(i, e)| BlockPredecessor::new(e, i)) } } -/// An iterator over EBB successors. The iterator type is `Ebb`. -pub type SuccIter<'a> = bforest::SetIter<'a, Ebb>; +/// An iterator over block successors. The iterator type is `Block`. +pub type SuccIter<'a> = bforest::SetIter<'a, Block>; #[cfg(test)] mod tests { @@ -225,126 +225,126 @@ mod tests { #[test] fn no_predecessors() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); - func.layout.append_ebb(ebb0); - func.layout.append_ebb(ebb1); - func.layout.append_ebb(ebb2); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); + func.layout.append_block(block0); + func.layout.append_block(block1); + func.layout.append_block(block2); let cfg = ControlFlowGraph::with_function(&func); - let mut fun_ebbs = func.layout.ebbs(); - for ebb in func.layout.ebbs() { - assert_eq!(ebb, fun_ebbs.next().unwrap()); - assert_eq!(cfg.pred_iter(ebb).count(), 0); - assert_eq!(cfg.succ_iter(ebb).count(), 0); + let mut fun_blocks = func.layout.blocks(); + for block in func.layout.blocks() { + assert_eq!(block, fun_blocks.next().unwrap()); + assert_eq!(cfg.pred_iter(block).count(), 0); + assert_eq!(cfg.succ_iter(block).count(), 0); } } #[test] fn branches_and_jumps() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let cond = func.dfg.append_ebb_param(ebb0, types::I32); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let cond = func.dfg.append_block_param(block0, types::I32); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); - let br_ebb0_ebb2; - let br_ebb1_ebb1; - let jmp_ebb0_ebb1; - let jmp_ebb1_ebb2; + let br_block0_block2; + let br_block1_block1; + let jmp_block0_block1; + let jmp_block1_block2; { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - br_ebb0_ebb2 = cur.ins().brnz(cond, ebb2, &[]); - jmp_ebb0_ebb1 = cur.ins().jump(ebb1, &[]); + cur.insert_block(block0); + br_block0_block2 = cur.ins().brnz(cond, block2, &[]); + jmp_block0_block1 = cur.ins().jump(block1, &[]); - cur.insert_ebb(ebb1); - br_ebb1_ebb1 = cur.ins().brnz(cond, ebb1, &[]); - jmp_ebb1_ebb2 = cur.ins().jump(ebb2, &[]); + cur.insert_block(block1); + br_block1_block1 = cur.ins().brnz(cond, block1, &[]); + jmp_block1_block2 = cur.ins().jump(block2, &[]); - cur.insert_ebb(ebb2); + cur.insert_block(block2); } let mut cfg = ControlFlowGraph::with_function(&func); { - let ebb0_predecessors = cfg.pred_iter(ebb0).collect::>(); - let ebb1_predecessors = cfg.pred_iter(ebb1).collect::>(); - let ebb2_predecessors = cfg.pred_iter(ebb2).collect::>(); + let block0_predecessors = cfg.pred_iter(block0).collect::>(); + let block1_predecessors = cfg.pred_iter(block1).collect::>(); + let block2_predecessors = cfg.pred_iter(block2).collect::>(); - let ebb0_successors = cfg.succ_iter(ebb0).collect::>(); - let ebb1_successors = cfg.succ_iter(ebb1).collect::>(); - let ebb2_successors = cfg.succ_iter(ebb2).collect::>(); + let block0_successors = cfg.succ_iter(block0).collect::>(); + let block1_successors = cfg.succ_iter(block1).collect::>(); + let block2_successors = cfg.succ_iter(block2).collect::>(); - assert_eq!(ebb0_predecessors.len(), 0); - assert_eq!(ebb1_predecessors.len(), 2); - assert_eq!(ebb2_predecessors.len(), 2); + assert_eq!(block0_predecessors.len(), 0); + assert_eq!(block1_predecessors.len(), 2); + assert_eq!(block2_predecessors.len(), 2); assert_eq!( - ebb1_predecessors.contains(&BasicBlock::new(ebb0, jmp_ebb0_ebb1)), + block1_predecessors.contains(&BlockPredecessor::new(block0, jmp_block0_block1)), true ); assert_eq!( - ebb1_predecessors.contains(&BasicBlock::new(ebb1, br_ebb1_ebb1)), + block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)), true ); assert_eq!( - ebb2_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb2)), + block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)), true ); assert_eq!( - ebb2_predecessors.contains(&BasicBlock::new(ebb1, jmp_ebb1_ebb2)), + block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)), true ); - assert_eq!(ebb0_successors, [ebb1, ebb2]); - assert_eq!(ebb1_successors, [ebb1, ebb2]); - assert_eq!(ebb2_successors, []); + assert_eq!(block0_successors, [block1, block2]); + assert_eq!(block1_successors, [block1, block2]); + assert_eq!(block2_successors, []); } - // Change some instructions and recompute ebb0 - func.dfg.replace(br_ebb0_ebb2).brnz(cond, ebb1, &[]); - func.dfg.replace(jmp_ebb0_ebb1).return_(&[]); - cfg.recompute_ebb(&mut func, ebb0); - let br_ebb0_ebb1 = br_ebb0_ebb2; + // Change some instructions and recompute block0 + func.dfg.replace(br_block0_block2).brnz(cond, block1, &[]); + func.dfg.replace(jmp_block0_block1).return_(&[]); + cfg.recompute_block(&mut func, block0); + let br_block0_block1 = br_block0_block2; { - let ebb0_predecessors = cfg.pred_iter(ebb0).collect::>(); - let ebb1_predecessors = cfg.pred_iter(ebb1).collect::>(); - let ebb2_predecessors = cfg.pred_iter(ebb2).collect::>(); + let block0_predecessors = cfg.pred_iter(block0).collect::>(); + let block1_predecessors = cfg.pred_iter(block1).collect::>(); + let block2_predecessors = cfg.pred_iter(block2).collect::>(); - let ebb0_successors = cfg.succ_iter(ebb0); - let ebb1_successors = cfg.succ_iter(ebb1); - let ebb2_successors = cfg.succ_iter(ebb2); + let block0_successors = cfg.succ_iter(block0); + let block1_successors = cfg.succ_iter(block1); + let block2_successors = cfg.succ_iter(block2); - assert_eq!(ebb0_predecessors.len(), 0); - assert_eq!(ebb1_predecessors.len(), 2); - assert_eq!(ebb2_predecessors.len(), 1); + assert_eq!(block0_predecessors.len(), 0); + assert_eq!(block1_predecessors.len(), 2); + assert_eq!(block2_predecessors.len(), 1); assert_eq!( - ebb1_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb1)), + block1_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block1)), true ); assert_eq!( - ebb1_predecessors.contains(&BasicBlock::new(ebb1, br_ebb1_ebb1)), + block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)), true ); assert_eq!( - ebb2_predecessors.contains(&BasicBlock::new(ebb0, br_ebb0_ebb2)), + block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)), false ); assert_eq!( - ebb2_predecessors.contains(&BasicBlock::new(ebb1, jmp_ebb1_ebb2)), + block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)), true ); - assert_eq!(ebb0_successors.collect::>(), [ebb1]); - assert_eq!(ebb1_successors.collect::>(), [ebb1, ebb2]); - assert_eq!(ebb2_successors.collect::>(), []); + assert_eq!(block0_successors.collect::>(), [block1]); + assert_eq!(block1_successors.collect::>(), [block1, block2]); + assert_eq!(block2_successors.collect::>(), []); } } } diff --git a/cranelift/codegen/src/ir/builder.rs b/cranelift/codegen/src/ir/builder.rs index 13202870f3..63054928f2 100644 --- a/cranelift/codegen/src/ir/builder.rs +++ b/cranelift/codegen/src/ir/builder.rs @@ -223,10 +223,10 @@ mod tests { #[test] fn types() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let arg0 = func.dfg.append_ebb_param(ebb0, I32); + let block0 = func.dfg.make_block(); + let arg0 = func.dfg.append_block_param(block0, I32); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); // Explicit types. let v0 = pos.ins().iconst(I32, 3); @@ -244,10 +244,10 @@ mod tests { #[test] fn reuse_results() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let arg0 = func.dfg.append_ebb_param(ebb0, I32); + let block0 = func.dfg.make_block(); + let arg0 = func.dfg.append_block_param(block0, I32); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); let v0 = pos.ins().iadd_imm(arg0, 17); assert_eq!(pos.func.dfg.value_type(v0), I32); diff --git a/cranelift/codegen/src/ir/dfg.rs b/cranelift/codegen/src/ir/dfg.rs index 7d453836f5..479aec1cfb 100644 --- a/cranelift/codegen/src/ir/dfg.rs +++ b/cranelift/codegen/src/ir/dfg.rs @@ -1,4 +1,4 @@ -//! Data flow graph tracking Instructions, Values, and EBBs. +//! Data flow graph tracking Instructions, Values, and blocks. use crate::entity::{self, PrimaryMap, SecondaryMap}; use crate::ir; @@ -7,7 +7,7 @@ use crate::ir::extfunc::ExtFuncData; use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData}; use crate::ir::{types, ConstantData, ConstantPool, Immediate}; use crate::ir::{ - Ebb, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList, + Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList, ValueListPool, }; use crate::isa::TargetIsa; @@ -21,18 +21,18 @@ use core::mem; use core::ops::{Index, IndexMut}; use core::u16; -/// A data flow graph defines all instructions and extended basic blocks in a function as well as +/// A data flow graph defines all instructions and basic blocks in a function as well as /// the data flow dependencies between them. The DFG also tracks values which can be either -/// instruction results or EBB parameters. +/// instruction results or block parameters. /// -/// The layout of EBBs in the function and of instructions in each EBB is recorded by the +/// The layout of blocks in the function and of instructions in each block is recorded by the /// `Layout` data structure which forms the other half of the function representation. /// #[derive(Clone)] pub struct DataFlowGraph { /// Data about all of the instructions in the function, including opcodes and operands. /// The instructions in this map are not in program order. That is tracked by `Layout`, along - /// with the EBB containing each instruction. + /// with the block containing each instruction. insts: PrimaryMap, /// List of result values for each instruction. @@ -41,11 +41,11 @@ pub struct DataFlowGraph { /// primary `insts` map. results: SecondaryMap, - /// Extended basic blocks in the function and their parameters. + /// basic blocks in the function and their parameters. /// /// This map is not in program order. That is handled by `Layout`, and so is the sequence of - /// instructions contained in each EBB. - ebbs: PrimaryMap, + /// instructions contained in each block. + blocks: PrimaryMap, /// Memory pool of value lists. /// @@ -53,7 +53,7 @@ pub struct DataFlowGraph { /// /// - Instructions in `insts` that don't have room for their entire argument list inline. /// - Instruction result values in `results`. - /// - EBB parameters in `ebbs`. + /// - block parameters in `blocks`. pub value_lists: ValueListPool, /// Primary value table with entries for all values. @@ -85,7 +85,7 @@ impl DataFlowGraph { Self { insts: PrimaryMap::new(), results: SecondaryMap::new(), - ebbs: PrimaryMap::new(), + blocks: PrimaryMap::new(), value_lists: ValueListPool::new(), values: PrimaryMap::new(), signatures: PrimaryMap::new(), @@ -101,7 +101,7 @@ impl DataFlowGraph { pub fn clear(&mut self) { self.insts.clear(); self.results.clear(); - self.ebbs.clear(); + self.blocks.clear(); self.value_lists.clear(); self.values.clear(); self.signatures.clear(); @@ -125,17 +125,17 @@ impl DataFlowGraph { self.insts.is_valid(inst) } - /// Get the total number of extended basic blocks created in this function, whether they are + /// Get the total number of basic blocks created in this function, whether they are /// currently inserted in the layout or not. /// /// This is intended for use with `SecondaryMap::with_capacity`. - pub fn num_ebbs(&self) -> usize { - self.ebbs.len() + pub fn num_blocks(&self) -> usize { + self.blocks.len() } - /// Returns `true` if the given ebb reference is valid. - pub fn ebb_is_valid(&self, ebb: Ebb) -> bool { - self.ebbs.is_valid(ebb) + /// Returns `true` if the given block reference is valid. + pub fn block_is_valid(&self, block: Block) -> bool { + self.blocks.is_valid(block) } /// Get the total number of values. @@ -213,7 +213,7 @@ impl<'a> Iterator for Values<'a> { /// Handling values. /// -/// Values are either EBB parameters or instruction results. +/// Values are either block parameters or instruction results. impl DataFlowGraph { /// Allocate an extended value entry. fn make_value(&mut self, data: ValueData) -> Value { @@ -243,12 +243,12 @@ impl DataFlowGraph { /// Get the definition of a value. /// - /// This is either the instruction that defined it or the Ebb that has the value as an + /// This is either the instruction that defined it or the Block that has the value as an /// parameter. pub fn value_def(&self, v: Value) -> ValueDef { match self.values[v] { ValueData::Inst { inst, num, .. } => ValueDef::Result(inst, num as usize), - ValueData::Param { ebb, num, .. } => ValueDef::Param(ebb, num as usize), + ValueData::Param { block, num, .. } => ValueDef::Param(block, num as usize), ValueData::Alias { original, .. } => { // Make sure we only recurse one level. `resolve_aliases` has safeguards to // detect alias loops without overrunning the stack. @@ -257,7 +257,7 @@ impl DataFlowGraph { } } - /// Determine if `v` is an attached instruction result / EBB parameter. + /// Determine if `v` is an attached instruction result / block parameter. /// /// An attached value can't be attached to something else without first being detached. /// @@ -267,7 +267,7 @@ impl DataFlowGraph { use self::ValueData::*; match self.values[v] { Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize), - Param { ebb, num, .. } => Some(&v) == self.ebb_params(ebb).get(num as usize), + Param { block, num, .. } => Some(&v) == self.block_params(block).get(num as usize), Alias { .. } => false, } } @@ -297,7 +297,7 @@ impl DataFlowGraph { /// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest` /// will behave as if they used that value `src`. /// - /// The `dest` value can't be attached to an instruction or EBB. + /// The `dest` value can't be attached to an instruction or block. pub fn change_to_alias(&mut self, dest: Value, src: Value) { debug_assert!(!self.value_is_attached(dest)); // Try to create short alias chains by finding the original source value. @@ -376,8 +376,8 @@ impl DataFlowGraph { pub enum ValueDef { /// Value is the n'th result of an instruction. Result(Inst, usize), - /// Value is the n'th parameter to an EBB. - Param(Ebb, usize), + /// Value is the n'th parameter to an block. + Param(Block, usize), } impl ValueDef { @@ -389,11 +389,11 @@ impl ValueDef { } } - /// Unwrap the EBB there the parameter is defined, or panic. - pub fn unwrap_ebb(&self) -> Ebb { + /// Unwrap the block there the parameter is defined, or panic. + pub fn unwrap_block(&self) -> Block { match *self { - Self::Param(ebb, _) => ebb, - _ => panic!("Value is not an EBB parameter"), + Self::Param(block, _) => block, + _ => panic!("Value is not an block parameter"), } } @@ -419,12 +419,12 @@ enum ValueData { /// Value is defined by an instruction. Inst { ty: Type, num: u16, inst: Inst }, - /// Value is an EBB parameter. - Param { ty: Type, num: u16, ebb: Ebb }, + /// Value is an block parameter. + Param { ty: Type, num: u16, block: Block }, /// Value is an alias of another value. - /// An alias value can't be linked as an instruction result or EBB parameter. It is used as a - /// placeholder when the original instruction or EBB has been rewritten or modified. + /// An alias value can't be linked as an instruction result or block parameter. It is used as a + /// placeholder when the original instruction or block has been rewritten or modified. Alias { ty: Type, original: Value }, } @@ -760,61 +760,64 @@ impl IndexMut for DataFlowGraph { } } -/// Extended basic blocks. +/// basic blocks. impl DataFlowGraph { /// Create a new basic block. - pub fn make_ebb(&mut self) -> Ebb { - self.ebbs.push(EbbData::new()) + pub fn make_block(&mut self) -> Block { + self.blocks.push(BlockData::new()) } - /// Get the number of parameters on `ebb`. - pub fn num_ebb_params(&self, ebb: Ebb) -> usize { - self.ebbs[ebb].params.len(&self.value_lists) + /// Get the number of parameters on `block`. + pub fn num_block_params(&self, block: Block) -> usize { + self.blocks[block].params.len(&self.value_lists) } - /// Get the parameters on `ebb`. - pub fn ebb_params(&self, ebb: Ebb) -> &[Value] { - self.ebbs[ebb].params.as_slice(&self.value_lists) + /// Get the parameters on `block`. + pub fn block_params(&self, block: Block) -> &[Value] { + self.blocks[block].params.as_slice(&self.value_lists) } - /// Get the types of the parameters on `ebb`. - pub fn ebb_param_types(&self, ebb: Ebb) -> Vec { - self.ebb_params(ebb) + /// Get the types of the parameters on `block`. + pub fn block_param_types(&self, block: Block) -> Vec { + self.block_params(block) .iter() .map(|&v| self.value_type(v)) .collect() } - /// Append a parameter with type `ty` to `ebb`. - pub fn append_ebb_param(&mut self, ebb: Ebb, ty: Type) -> Value { + /// Append a parameter with type `ty` to `block`. + pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value { let param = self.values.next_key(); - let num = self.ebbs[ebb].params.push(param, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB"); + let num = self.blocks[block].params.push(param, &mut self.value_lists); + debug_assert!(num <= u16::MAX as usize, "Too many parameters on block"); self.make_value(ValueData::Param { ty, num: num as u16, - ebb, + block, }) } - /// Removes `val` from `ebb`'s parameters by swapping it with the last parameter on `ebb`. + /// Removes `val` from `block`'s parameters by swapping it with the last parameter on `block`. /// Returns the position of `val` before removal. /// /// *Important*: to ensure O(1) deletion, this method swaps the removed parameter with the - /// last `ebb` parameter. This can disrupt all the branch instructions jumping to this - /// `ebb` for which you have to change the branch argument order if necessary. + /// last `block` parameter. This can disrupt all the branch instructions jumping to this + /// `block` for which you have to change the branch argument order if necessary. /// - /// Panics if `val` is not an EBB parameter. - pub fn swap_remove_ebb_param(&mut self, val: Value) -> usize { - let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[val] { - (ebb, num) + /// Panics if `val` is not an block parameter. + pub fn swap_remove_block_param(&mut self, val: Value) -> usize { + let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] { + (block, num) } else { - panic!("{} must be an EBB parameter", val); + panic!("{} must be an block parameter", val); }; - self.ebbs[ebb] + self.blocks[block] .params .swap_remove(num as usize, &mut self.value_lists); - if let Some(last_arg_val) = self.ebbs[ebb].params.get(num as usize, &self.value_lists) { + if let Some(last_arg_val) = self.blocks[block] + .params + .get(num as usize, &self.value_lists) + { // We update the position of the old last arg. if let ValueData::Param { num: ref mut old_num, @@ -823,25 +826,25 @@ impl DataFlowGraph { { *old_num = num; } else { - panic!("{} should be an Ebb parameter", last_arg_val); + panic!("{} should be an Block parameter", last_arg_val); } } num as usize } - /// Removes `val` from `ebb`'s parameters by a standard linear time list removal which + /// Removes `val` from `block`'s parameters by a standard linear time list removal which /// preserves ordering. Also updates the values' data. - pub fn remove_ebb_param(&mut self, val: Value) { - let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[val] { - (ebb, num) + pub fn remove_block_param(&mut self, val: Value) { + let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] { + (block, num) } else { - panic!("{} must be an EBB parameter", val); + panic!("{} must be an block parameter", val); }; - self.ebbs[ebb] + self.blocks[block] .params .remove(num as usize, &mut self.value_lists); - for index in num..(self.num_ebb_params(ebb) as u16) { - match self.values[self.ebbs[ebb] + for index in num..(self.num_block_params(block) as u16) { + match self.values[self.blocks[block] .params .get(index as usize, &self.value_lists) .unwrap()] @@ -850,8 +853,8 @@ impl DataFlowGraph { *num -= 1; } _ => panic!( - "{} must be an EBB parameter", - self.ebbs[ebb] + "{} must be an block parameter", + self.blocks[block] .params .get(index as usize, &self.value_lists) .unwrap() @@ -860,71 +863,73 @@ impl DataFlowGraph { } } - /// Append an existing value to `ebb`'s parameters. + /// Append an existing value to `block`'s parameters. /// /// The appended value can't already be attached to something else. /// - /// In almost all cases, you should be using `append_ebb_param()` instead of this method. - pub fn attach_ebb_param(&mut self, ebb: Ebb, param: Value) { + /// In almost all cases, you should be using `append_block_param()` instead of this method. + pub fn attach_block_param(&mut self, block: Block, param: Value) { debug_assert!(!self.value_is_attached(param)); - let num = self.ebbs[ebb].params.push(param, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB"); + let num = self.blocks[block].params.push(param, &mut self.value_lists); + debug_assert!(num <= u16::MAX as usize, "Too many parameters on block"); let ty = self.value_type(param); self.values[param] = ValueData::Param { ty, num: num as u16, - ebb, + block, }; } - /// Replace an EBB parameter with a new value of type `ty`. + /// Replace an block parameter with a new value of type `ty`. /// - /// The `old_value` must be an attached EBB parameter. It is removed from its place in the list + /// The `old_value` must be an attached block parameter. It is removed from its place in the list /// of parameters and replaced by a new value of type `new_type`. The new value gets the same /// position in the list, and other parameters are not disturbed. /// /// The old value is left detached, so it should probably be changed into something else. /// /// Returns the new value. - pub fn replace_ebb_param(&mut self, old_value: Value, new_type: Type) -> Value { + pub fn replace_block_param(&mut self, old_value: Value, new_type: Type) -> Value { // Create new value identical to the old one except for the type. - let (ebb, num) = if let ValueData::Param { num, ebb, .. } = self.values[old_value] { - (ebb, num) + let (block, num) = if let ValueData::Param { num, block, .. } = self.values[old_value] { + (block, num) } else { - panic!("{} must be an EBB parameter", old_value); + panic!("{} must be an block parameter", old_value); }; let new_arg = self.make_value(ValueData::Param { ty: new_type, num, - ebb, + block, }); - self.ebbs[ebb].params.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg; + self.blocks[block] + .params + .as_mut_slice(&mut self.value_lists)[num as usize] = new_arg; new_arg } - /// Detach all the parameters from `ebb` and return them as a `ValueList`. + /// Detach all the parameters from `block` and return them as a `ValueList`. /// - /// This is a quite low-level operation. Sensible things to do with the detached EBB parameters - /// is to put them back on the same EBB with `attach_ebb_param()` or change them into aliases + /// This is a quite low-level operation. Sensible things to do with the detached block parameters + /// is to put them back on the same block with `attach_block_param()` or change them into aliases /// with `change_to_alias()`. - pub fn detach_ebb_params(&mut self, ebb: Ebb) -> ValueList { - self.ebbs[ebb].params.take() + pub fn detach_block_params(&mut self, block: Block) -> ValueList { + self.blocks[block].params.take() } } -/// Contents of an extended basic block. +/// Contents of a basic block. /// -/// Parameters on an extended basic block are values that dominate everything in the EBB. All -/// branches to this EBB must provide matching arguments, and the arguments to the entry EBB must +/// Parameters on a basic block are values that dominate everything in the block. All +/// branches to this block must provide matching arguments, and the arguments to the entry block must /// match the function arguments. #[derive(Clone)] -struct EbbData { - /// List of parameters to this EBB. +struct BlockData { + /// List of parameters to this block. params: ValueList, } -impl EbbData { +impl BlockData { fn new() -> Self { Self { params: ValueList::new(), @@ -1012,17 +1017,17 @@ impl DataFlowGraph { self.make_inst_results_reusing(inst, ctrl_typevar, reuse.iter().map(|x| Some(*x))) } - /// Similar to `append_ebb_param`, append a parameter with type `ty` to - /// `ebb`, but using value `val`. This is only for use by the parser to + /// Similar to `append_block_param`, append a parameter with type `ty` to + /// `block`, but using value `val`. This is only for use by the parser to /// create parameters with specific values. #[cold] - pub fn append_ebb_param_for_parser(&mut self, ebb: Ebb, ty: Type, val: Value) { - let num = self.ebbs[ebb].params.push(val, &mut self.value_lists); - assert!(num <= u16::MAX as usize, "Too many parameters on EBB"); + pub fn append_block_param_for_parser(&mut self, block: Block, ty: Type, val: Value) { + let num = self.blocks[block].params.push(val, &mut self.value_lists); + assert!(num <= u16::MAX as usize, "Too many parameters on block"); self.values[val] = ValueData::Param { ty, num: num as u16, - ebb, + block, }; } @@ -1165,95 +1170,95 @@ mod tests { } #[test] - fn ebb() { + fn block() { let mut dfg = DataFlowGraph::new(); - let ebb = dfg.make_ebb(); - assert_eq!(ebb.to_string(), "ebb0"); - assert_eq!(dfg.num_ebb_params(ebb), 0); - assert_eq!(dfg.ebb_params(ebb), &[]); - assert!(dfg.detach_ebb_params(ebb).is_empty()); - assert_eq!(dfg.num_ebb_params(ebb), 0); - assert_eq!(dfg.ebb_params(ebb), &[]); + let block = dfg.make_block(); + assert_eq!(block.to_string(), "block0"); + assert_eq!(dfg.num_block_params(block), 0); + assert_eq!(dfg.block_params(block), &[]); + assert!(dfg.detach_block_params(block).is_empty()); + assert_eq!(dfg.num_block_params(block), 0); + assert_eq!(dfg.block_params(block), &[]); - let arg1 = dfg.append_ebb_param(ebb, types::F32); + let arg1 = dfg.append_block_param(block, types::F32); assert_eq!(arg1.to_string(), "v0"); - assert_eq!(dfg.num_ebb_params(ebb), 1); - assert_eq!(dfg.ebb_params(ebb), &[arg1]); + assert_eq!(dfg.num_block_params(block), 1); + assert_eq!(dfg.block_params(block), &[arg1]); - let arg2 = dfg.append_ebb_param(ebb, types::I16); + let arg2 = dfg.append_block_param(block, types::I16); assert_eq!(arg2.to_string(), "v1"); - assert_eq!(dfg.num_ebb_params(ebb), 2); - assert_eq!(dfg.ebb_params(ebb), &[arg1, arg2]); + assert_eq!(dfg.num_block_params(block), 2); + assert_eq!(dfg.block_params(block), &[arg1, arg2]); - assert_eq!(dfg.value_def(arg1), ValueDef::Param(ebb, 0)); - assert_eq!(dfg.value_def(arg2), ValueDef::Param(ebb, 1)); + assert_eq!(dfg.value_def(arg1), ValueDef::Param(block, 0)); + assert_eq!(dfg.value_def(arg2), ValueDef::Param(block, 1)); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(arg2), types::I16); - // Swap the two EBB parameters. - let vlist = dfg.detach_ebb_params(ebb); - assert_eq!(dfg.num_ebb_params(ebb), 0); - assert_eq!(dfg.ebb_params(ebb), &[]); + // Swap the two block parameters. + let vlist = dfg.detach_block_params(block); + assert_eq!(dfg.num_block_params(block), 0); + assert_eq!(dfg.block_params(block), &[]); assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]); - dfg.attach_ebb_param(ebb, arg2); - let arg3 = dfg.append_ebb_param(ebb, types::I32); - dfg.attach_ebb_param(ebb, arg1); - assert_eq!(dfg.ebb_params(ebb), &[arg2, arg3, arg1]); + dfg.attach_block_param(block, arg2); + let arg3 = dfg.append_block_param(block, types::I32); + dfg.attach_block_param(block, arg1); + assert_eq!(dfg.block_params(block), &[arg2, arg3, arg1]); } #[test] - fn replace_ebb_params() { + fn replace_block_params() { let mut dfg = DataFlowGraph::new(); - let ebb = dfg.make_ebb(); - let arg1 = dfg.append_ebb_param(ebb, types::F32); + let block = dfg.make_block(); + let arg1 = dfg.append_block_param(block, types::F32); - let new1 = dfg.replace_ebb_param(arg1, types::I64); + let new1 = dfg.replace_block_param(arg1, types::I64); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new1), types::I64); - assert_eq!(dfg.ebb_params(ebb), &[new1]); + assert_eq!(dfg.block_params(block), &[new1]); - dfg.attach_ebb_param(ebb, arg1); - assert_eq!(dfg.ebb_params(ebb), &[new1, arg1]); + dfg.attach_block_param(block, arg1); + assert_eq!(dfg.block_params(block), &[new1, arg1]); - let new2 = dfg.replace_ebb_param(arg1, types::I8); + let new2 = dfg.replace_block_param(arg1, types::I8); assert_eq!(dfg.value_type(arg1), types::F32); assert_eq!(dfg.value_type(new2), types::I8); - assert_eq!(dfg.ebb_params(ebb), &[new1, new2]); + assert_eq!(dfg.block_params(block), &[new1, new2]); - dfg.attach_ebb_param(ebb, arg1); - assert_eq!(dfg.ebb_params(ebb), &[new1, new2, arg1]); + dfg.attach_block_param(block, arg1); + assert_eq!(dfg.block_params(block), &[new1, new2, arg1]); - let new3 = dfg.replace_ebb_param(new2, types::I16); + let new3 = dfg.replace_block_param(new2, types::I16); assert_eq!(dfg.value_type(new1), types::I64); assert_eq!(dfg.value_type(new2), types::I8); assert_eq!(dfg.value_type(new3), types::I16); - assert_eq!(dfg.ebb_params(ebb), &[new1, new3, arg1]); + assert_eq!(dfg.block_params(block), &[new1, new3, arg1]); } #[test] - fn swap_remove_ebb_params() { + fn swap_remove_block_params() { let mut dfg = DataFlowGraph::new(); - let ebb = dfg.make_ebb(); - let arg1 = dfg.append_ebb_param(ebb, types::F32); - let arg2 = dfg.append_ebb_param(ebb, types::F32); - let arg3 = dfg.append_ebb_param(ebb, types::F32); - assert_eq!(dfg.ebb_params(ebb), &[arg1, arg2, arg3]); + let block = dfg.make_block(); + let arg1 = dfg.append_block_param(block, types::F32); + let arg2 = dfg.append_block_param(block, types::F32); + let arg3 = dfg.append_block_param(block, types::F32); + assert_eq!(dfg.block_params(block), &[arg1, arg2, arg3]); - dfg.swap_remove_ebb_param(arg1); + dfg.swap_remove_block_param(arg1); assert_eq!(dfg.value_is_attached(arg1), false); assert_eq!(dfg.value_is_attached(arg2), true); assert_eq!(dfg.value_is_attached(arg3), true); - assert_eq!(dfg.ebb_params(ebb), &[arg3, arg2]); - dfg.swap_remove_ebb_param(arg2); + assert_eq!(dfg.block_params(block), &[arg3, arg2]); + dfg.swap_remove_block_param(arg2); assert_eq!(dfg.value_is_attached(arg2), false); assert_eq!(dfg.value_is_attached(arg3), true); - assert_eq!(dfg.ebb_params(ebb), &[arg3]); - dfg.swap_remove_ebb_param(arg3); + assert_eq!(dfg.block_params(block), &[arg3]); + dfg.swap_remove_block_param(arg3); assert_eq!(dfg.value_is_attached(arg3), false); - assert_eq!(dfg.ebb_params(ebb), &[]); + assert_eq!(dfg.block_params(block), &[]); } #[test] @@ -1261,9 +1266,9 @@ mod tests { use crate::ir::InstBuilder; let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); // Build a little test program. let v1 = pos.ins().iconst(types::I32, 42); @@ -1271,7 +1276,7 @@ mod tests { // Make sure we can resolve value aliases even when values is empty. assert_eq!(pos.func.dfg.resolve_aliases(v1), v1); - let arg0 = pos.func.dfg.append_ebb_param(ebb0, types::I32); + let arg0 = pos.func.dfg.append_block_param(block0, types::I32); let (s, c) = pos.ins().iadd_ifcout(v1, arg0); let iadd = match pos.func.dfg.value_def(s) { ValueDef::Result(i, 0) => i, diff --git a/cranelift/codegen/src/ir/entities.rs b/cranelift/codegen/src/ir/entities.rs index 6673c71a17..57906ab63a 100644 --- a/cranelift/codegen/src/ir/entities.rs +++ b/cranelift/codegen/src/ir/entities.rs @@ -1,7 +1,7 @@ //! Cranelift IR entity references. //! //! Instructions in Cranelift IR need to reference other entities in the function. This can be other -//! parts of the function like extended basic blocks or stack slots, or it can be external entities +//! parts of the function like basic blocks or stack slots, or it can be external entities //! that are declared in the function preamble in the text format. //! //! These entity references in instruction operands are not implemented as Rust references both @@ -25,20 +25,19 @@ use core::u32; #[cfg(feature = "enable-serde")] use serde::{Deserialize, Serialize}; -/// An opaque reference to an [extended basic -/// block](https://en.wikipedia.org/wiki/Extended_basic_block) in a +/// An opaque reference to a [basic block](https://en.wikipedia.org/wiki/Basic_block) in a /// [`Function`](super::function::Function). /// -/// You can get an `Ebb` using -/// [`FunctionBuilder::create_ebb`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_ebb) +/// You can get a `Block` using +/// [`FunctionBuilder::create_block`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_block) /// /// While the order is stable, it is arbitrary and does not necessarily resemble the layout order. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Ebb(u32); -entity_impl!(Ebb, "ebb"); +pub struct Block(u32); +entity_impl!(Block, "block"); -impl Ebb { - /// Create a new EBB reference from its number. This corresponds to the `ebbNN` representation. +impl Block { + /// Create a new block reference from its number. This corresponds to the `blockNN` representation. /// /// This method is for use by the parser. pub fn with_number(n: u32) -> Option { @@ -371,8 +370,8 @@ impl Table { pub enum AnyEntity { /// The whole function. Function, - /// An extended basic block. - Ebb(Ebb), + /// a basic block. + Block(Block), /// An instruction. Inst(Inst), /// An SSA value. @@ -397,7 +396,7 @@ impl fmt::Display for AnyEntity { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Self::Function => write!(f, "function"), - Self::Ebb(r) => r.fmt(f), + Self::Block(r) => r.fmt(f), Self::Inst(r) => r.fmt(f), Self::Value(r) => r.fmt(f), Self::StackSlot(r) => r.fmt(f), @@ -417,9 +416,9 @@ impl fmt::Debug for AnyEntity { } } -impl From for AnyEntity { - fn from(r: Ebb) -> Self { - Self::Ebb(r) +impl From for AnyEntity { + fn from(r: Block) -> Self { + Self::Block(r) } } diff --git a/cranelift/codegen/src/ir/extname.rs b/cranelift/codegen/src/ir/extname.rs index c0a9865373..c12a873d26 100644 --- a/cranelift/codegen/src/ir/extname.rs +++ b/cranelift/codegen/src/ir/extname.rs @@ -32,8 +32,8 @@ pub enum ExternalName { /// Arbitrary. index: u32, }, - /// A test case function name of up to 10 ascii characters. This is - /// not intended to be used outside test cases. + /// A test case function name of up to a hardcoded amount of ascii + /// characters. This is not intended to be used outside test cases. TestCase { /// How many of the bytes in `ascii` are valid? length: u8, diff --git a/cranelift/codegen/src/ir/function.rs b/cranelift/codegen/src/ir/function.rs index d3ff59690a..77c06964bf 100644 --- a/cranelift/codegen/src/ir/function.rs +++ b/cranelift/codegen/src/ir/function.rs @@ -1,17 +1,17 @@ //! Intermediate representation of a function. //! -//! The `Function` struct defined in this module owns all of its extended basic blocks and +//! The `Function` struct defined in this module owns all of its basic blocks and //! instructions. use crate::binemit::CodeOffset; use crate::entity::{PrimaryMap, SecondaryMap}; use crate::ir; -use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature}; use crate::ir::{ - Ebb, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, HeapData, Inst, JumpTable, + Block, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, HeapData, Inst, JumpTable, JumpTableData, Opcode, SigRef, StackSlot, StackSlotData, Table, TableData, }; -use crate::ir::{EbbOffsets, FrameLayout, InstEncodings, SourceLocs, StackSlots, ValueLocations}; +use crate::ir::{BlockOffsets, FrameLayout, InstEncodings, SourceLocs, StackSlots, ValueLocations}; +use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature}; use crate::ir::{JumpTableOffsets, JumpTables}; use crate::isa::{CallConv, EncInfo, Encoding, Legalize, TargetIsa}; use crate::regalloc::{EntryRegDiversions, RegDiversions}; @@ -50,10 +50,10 @@ pub struct Function { /// Jump tables used in this function. pub jump_tables: JumpTables, - /// Data flow graph containing the primary definition of all instructions, EBBs and values. + /// Data flow graph containing the primary definition of all instructions, blocks and values. pub dfg: DataFlowGraph, - /// Layout of EBBs and instructions in the function body. + /// Layout of blocks and instructions in the function body. pub layout: Layout, /// Encoding recipe and bits for the legal instructions. @@ -69,12 +69,12 @@ pub struct Function { /// ValueLocation. This field records these register-to-register moves as Diversions. pub entry_diversions: EntryRegDiversions, - /// Code offsets of the EBB headers. + /// Code offsets of the block headers. /// /// This information is only transiently available after the `binemit::relax_branches` function /// computes it, and it can easily be recomputed by calling that function. It is not included /// in the textual IR format. - pub offsets: EbbOffsets, + pub offsets: BlockOffsets, /// Code offsets of Jump Table headers. pub jt_offsets: JumpTableOffsets, @@ -207,10 +207,10 @@ impl Function { let entry = self.layout.entry_block().expect("Function is empty"); self.signature .special_param_index(purpose) - .map(|i| self.dfg.ebb_params(entry)[i]) + .map(|i| self.dfg.block_params(entry)[i]) } - /// Get an iterator over the instructions in `ebb`, including offsets and encoded instruction + /// Get an iterator over the instructions in `block`, including offsets and encoded instruction /// sizes. /// /// The iterator returns `(offset, inst, size)` tuples, where `offset` if the offset in bytes @@ -219,20 +219,20 @@ impl Function { /// /// This function can only be used after the code layout has been computed by the /// `binemit::relax_branches()` function. - pub fn inst_offsets<'a>(&'a self, ebb: Ebb, encinfo: &EncInfo) -> InstOffsetIter<'a> { + pub fn inst_offsets<'a>(&'a self, block: Block, encinfo: &EncInfo) -> InstOffsetIter<'a> { assert!( !self.offsets.is_empty(), "Code layout must be computed first" ); let mut divert = RegDiversions::new(); - divert.at_ebb(&self.entry_diversions, ebb); + divert.at_block(&self.entry_diversions, block); InstOffsetIter { encinfo: encinfo.clone(), func: self, divert, encodings: &self.encodings, - offset: self.offsets[ebb], - iter: self.layout.ebb_insts(ebb), + offset: self.offsets[block], + iter: self.layout.block_insts(block), } } @@ -260,19 +260,19 @@ impl Function { /// Changes the destination of a jump or branch instruction. /// Does nothing if called with a non-jump or non-branch instruction. - pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Ebb) { + pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Block) { match self.dfg[inst].branch_destination_mut() { None => (), Some(inst_dest) => *inst_dest = new_dest, } } - /// Checks that the specified EBB can be encoded as a basic block. + /// Checks that the specified block can be encoded as a basic block. /// /// On error, returns the first invalid instruction and an error message. - pub fn is_ebb_basic(&self, ebb: Ebb) -> Result<(), (Inst, &'static str)> { + pub fn is_block_basic(&self, block: Block) -> Result<(), (Inst, &'static str)> { let dfg = &self.dfg; - let inst_iter = self.layout.ebb_insts(ebb); + let inst_iter = self.layout.block_insts(block); // Ignore all instructions prior to the first branch. let mut inst_iter = inst_iter.skip_while(|&inst| !dfg[inst].opcode().is_branch()); diff --git a/cranelift/codegen/src/ir/instructions.rs b/cranelift/codegen/src/ir/instructions.rs index c194505deb..afe0266fe4 100644 --- a/cranelift/codegen/src/ir/instructions.rs +++ b/cranelift/codegen/src/ir/instructions.rs @@ -13,7 +13,7 @@ use core::str::FromStr; use crate::ir; use crate::ir::types; -use crate::ir::{Ebb, FuncRef, JumpTable, SigRef, Type, Value}; +use crate::ir::{Block, FuncRef, JumpTable, SigRef, Type, Value}; use crate::isa; use crate::bitset::BitSet; @@ -164,7 +164,7 @@ impl Default for VariableArgs { impl InstructionData { /// Return information about the destination of a branch or jump instruction. /// - /// Any instruction that can transfer control to another EBB reveals its possible destinations + /// Any instruction that can transfer control to another block reveals its possible destinations /// here. pub fn analyze_branch<'a>(&'a self, pool: &'a ValueListPool) -> BranchInfo<'a> { match *self { @@ -208,7 +208,7 @@ impl InstructionData { /// branch or jump. /// /// Multi-destination branches like `br_table` return `None`. - pub fn branch_destination(&self) -> Option { + pub fn branch_destination(&self) -> Option { match *self { Self::Jump { destination, .. } | Self::Branch { destination, .. } @@ -227,7 +227,7 @@ impl InstructionData { /// single destination branch or jump. /// /// Multi-destination branches like `br_table` return `None`. - pub fn branch_destination_mut(&mut self) -> Option<&mut Ebb> { + pub fn branch_destination_mut(&mut self) -> Option<&mut Block> { match *self { Self::Jump { ref mut destination, @@ -279,15 +279,15 @@ impl InstructionData { /// Information about branch and jump instructions. pub enum BranchInfo<'a> { /// This is not a branch or jump instruction. - /// This instruction will not transfer control to another EBB in the function, but it may still + /// This instruction will not transfer control to another block in the function, but it may still /// affect control flow by returning or trapping. NotABranch, - /// This is a branch or jump to a single destination EBB, possibly taking value arguments. - SingleDest(Ebb, &'a [Value]), + /// This is a branch or jump to a single destination block, possibly taking value arguments. + SingleDest(Block, &'a [Value]), - /// This is a jump table branch which can have many destination EBBs and maybe one default EBB. - Table(JumpTable, Option), + /// This is a jump table branch which can have many destination blocks and maybe one default block. + Table(JumpTable, Option), } /// Information about call instructions. diff --git a/cranelift/codegen/src/ir/jumptable.rs b/cranelift/codegen/src/ir/jumptable.rs index 372ad09837..a0596728a3 100644 --- a/cranelift/codegen/src/ir/jumptable.rs +++ b/cranelift/codegen/src/ir/jumptable.rs @@ -3,7 +3,7 @@ //! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference. //! The actual table of destinations is stored in a `JumpTableData` struct defined in this module. -use crate::ir::entities::Ebb; +use crate::ir::entities::Block; use alloc::vec::Vec; use core::fmt::{self, Display, Formatter}; use core::slice::{Iter, IterMut}; @@ -14,7 +14,7 @@ use core::slice::{Iter, IterMut}; #[derive(Clone)] pub struct JumpTableData { // Table entries. - table: Vec, + table: Vec, } impl JumpTableData { @@ -36,32 +36,32 @@ impl JumpTableData { } /// Append a table entry. - pub fn push_entry(&mut self, dest: Ebb) { + pub fn push_entry(&mut self, dest: Block) { self.table.push(dest) } - /// Checks if any of the entries branch to `ebb`. - pub fn branches_to(&self, ebb: Ebb) -> bool { - self.table.iter().any(|target_ebb| *target_ebb == ebb) + /// Checks if any of the entries branch to `block`. + pub fn branches_to(&self, block: Block) -> bool { + self.table.iter().any(|target_block| *target_block == block) } /// Access the whole table as a slice. - pub fn as_slice(&self) -> &[Ebb] { + pub fn as_slice(&self) -> &[Block] { self.table.as_slice() } /// Access the whole table as a mutable slice. - pub fn as_mut_slice(&mut self) -> &mut [Ebb] { + pub fn as_mut_slice(&mut self) -> &mut [Block] { self.table.as_mut_slice() } /// Returns an iterator over the table. - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter { self.table.iter() } /// Returns an iterator that allows modifying each value. - pub fn iter_mut(&mut self) -> IterMut { + pub fn iter_mut(&mut self) -> IterMut { self.table.iter_mut() } } @@ -73,8 +73,8 @@ impl Display for JumpTableData { None => (), Some(first) => write!(fmt, "{}", first)?, } - for ebb in self.table.iter().skip(1) { - write!(fmt, ", {}", ebb)?; + for block in self.table.iter().skip(1) { + write!(fmt, ", {}", block)?; } write!(fmt, "]") } @@ -84,7 +84,7 @@ impl Display for JumpTableData { mod tests { use super::JumpTableData; use crate::entity::EntityRef; - use crate::ir::Ebb; + use crate::ir::Block; use alloc::string::ToString; #[test] @@ -102,8 +102,8 @@ mod tests { #[test] fn insert() { - let e1 = Ebb::new(1); - let e2 = Ebb::new(2); + let e1 = Block::new(1); + let e2 = Block::new(2); let mut jt = JumpTableData::new(); @@ -111,7 +111,7 @@ mod tests { jt.push_entry(e2); jt.push_entry(e1); - assert_eq!(jt.to_string(), "jump_table [ebb1, ebb2, ebb1]"); + assert_eq!(jt.to_string(), "jump_table [block1, block2, block1]"); let v = jt.as_slice(); assert_eq!(v, [e1, e2, e1]); diff --git a/cranelift/codegen/src/ir/layout.rs b/cranelift/codegen/src/ir/layout.rs index b090b8d819..567a92514c 100644 --- a/cranelift/codegen/src/ir/layout.rs +++ b/cranelift/codegen/src/ir/layout.rs @@ -1,82 +1,82 @@ //! Function layout. //! -//! The order of extended basic blocks in a function and the order of instructions in an EBB is +//! The order of basic blocks in a function and the order of instructions in an block is //! determined by the `Layout` data structure defined in this module. use crate::entity::SecondaryMap; use crate::ir::dfg::DataFlowGraph; use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder}; -use crate::ir::{Ebb, Inst}; +use crate::ir::{Block, Inst}; use crate::packed_option::PackedOption; use crate::timing; use core::cmp; use core::iter::{IntoIterator, Iterator}; use log::debug; -/// The `Layout` struct determines the layout of EBBs and instructions in a function. It does not -/// contain definitions of instructions or EBBs, but depends on `Inst` and `Ebb` entity references +/// The `Layout` struct determines the layout of blocks and instructions in a function. It does not +/// contain definitions of instructions or blocks, but depends on `Inst` and `Block` entity references /// being defined elsewhere. /// /// This data structure determines: /// -/// - The order of EBBs in the function. -/// - Which EBB contains a given instruction. -/// - The order of instructions with an EBB. +/// - The order of blocks in the function. +/// - Which block contains a given instruction. +/// - The order of instructions with an block. /// /// While data dependencies are not recorded, instruction ordering does affect control /// dependencies, so part of the semantics of the program are determined by the layout. /// #[derive(Clone)] pub struct Layout { - /// Linked list nodes for the layout order of EBBs Forms a doubly linked list, terminated in + /// Linked list nodes for the layout order of blocks Forms a doubly linked list, terminated in /// both ends by `None`. - ebbs: SecondaryMap, + blocks: SecondaryMap, - /// Linked list nodes for the layout order of instructions. Forms a double linked list per EBB, + /// Linked list nodes for the layout order of instructions. Forms a double linked list per block, /// terminated in both ends by `None`. insts: SecondaryMap, - /// First EBB in the layout order, or `None` when no EBBs have been laid out. - first_ebb: Option, + /// First block in the layout order, or `None` when no blocks have been laid out. + first_block: Option, - /// Last EBB in the layout order, or `None` when no EBBs have been laid out. - last_ebb: Option, + /// Last block in the layout order, or `None` when no blocks have been laid out. + last_block: Option, } impl Layout { /// Create a new empty `Layout`. pub fn new() -> Self { Self { - ebbs: SecondaryMap::new(), + blocks: SecondaryMap::new(), insts: SecondaryMap::new(), - first_ebb: None, - last_ebb: None, + first_block: None, + last_block: None, } } /// Clear the layout. pub fn clear(&mut self) { - self.ebbs.clear(); + self.blocks.clear(); self.insts.clear(); - self.first_ebb = None; - self.last_ebb = None; + self.first_block = None; + self.last_block = None; } - /// Returns the capacity of the `EbbData` map. - pub fn ebb_capacity(&self) -> usize { - self.ebbs.capacity() + /// Returns the capacity of the `BlockData` map. + pub fn block_capacity(&self) -> usize { + self.blocks.capacity() } } /// Sequence numbers. /// -/// All instructions and EBBs are given a sequence number that can be used to quickly determine +/// All instructions and blocks are given a sequence number that can be used to quickly determine /// their relative position in the layout. The sequence numbers are not contiguous, but are assigned /// like line numbers in BASIC: 10, 20, 30, ... /// -/// The EBB sequence numbers are strictly increasing, and so are the instruction sequence numbers -/// within an EBB. The instruction sequence numbers are all between the sequence number of their -/// containing EBB and the following EBB. +/// The block sequence numbers are strictly increasing, and so are the instruction sequence numbers +/// within an block. The instruction sequence numbers are all between the sequence number of their +/// containing block and the following block. /// /// The result is that sequence numbers work like BASIC line numbers for the textual form of the IR. type SequenceNumber = u32; @@ -127,11 +127,11 @@ impl ProgramOrder for Layout { a_seq.cmp(&b_seq) } - fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool { + fn is_block_gap(&self, inst: Inst, block: Block) -> bool { let i = &self.insts[inst]; - let e = &self.ebbs[ebb]; + let e = &self.blocks[block]; - i.next.is_none() && i.ebb == e.prev + i.next.is_none() && i.block == e.prev } } @@ -139,71 +139,71 @@ impl ProgramOrder for Layout { impl Layout { /// Get the sequence number of a program point that must correspond to an entity in the layout. fn seq>(&self, pp: PP) -> SequenceNumber { - // When `PP = Inst` or `PP = Ebb`, we expect this dynamic type check to be optimized out. + // When `PP = Inst` or `PP = Block`, we expect this dynamic type check to be optimized out. match pp.into() { - ExpandedProgramPoint::Ebb(ebb) => self.ebbs[ebb].seq, + ExpandedProgramPoint::Block(block) => self.blocks[block].seq, ExpandedProgramPoint::Inst(inst) => self.insts[inst].seq, } } - /// Get the last sequence number in `ebb`. - fn last_ebb_seq(&self, ebb: Ebb) -> SequenceNumber { - // Get the seq of the last instruction if it exists, otherwise use the EBB header seq. - self.ebbs[ebb] + /// Get the last sequence number in `block`. + fn last_block_seq(&self, block: Block) -> SequenceNumber { + // Get the seq of the last instruction if it exists, otherwise use the block header seq. + self.blocks[block] .last_inst .map(|inst| self.insts[inst].seq) - .unwrap_or(self.ebbs[ebb].seq) + .unwrap_or(self.blocks[block].seq) } - /// Assign a valid sequence number to `ebb` such that the numbers are still monotonic. This may + /// Assign a valid sequence number to `block` such that the numbers are still monotonic. This may /// require renumbering. - fn assign_ebb_seq(&mut self, ebb: Ebb) { - debug_assert!(self.is_ebb_inserted(ebb)); + fn assign_block_seq(&mut self, block: Block) { + debug_assert!(self.is_block_inserted(block)); - // Get the sequence number immediately before `ebb`, or 0. - let prev_seq = self.ebbs[ebb] + // Get the sequence number immediately before `block`, or 0. + let prev_seq = self.blocks[block] .prev - .map(|prev_ebb| self.last_ebb_seq(prev_ebb)) + .map(|prev_block| self.last_block_seq(prev_block)) .unwrap_or(0); - // Get the sequence number immediately following `ebb`. - let next_seq = if let Some(inst) = self.ebbs[ebb].first_inst.expand() { + // Get the sequence number immediately following `block`. + let next_seq = if let Some(inst) = self.blocks[block].first_inst.expand() { self.insts[inst].seq - } else if let Some(next_ebb) = self.ebbs[ebb].next.expand() { - self.ebbs[next_ebb].seq + } else if let Some(next_block) = self.blocks[block].next.expand() { + self.blocks[next_block].seq } else { - // There is nothing after `ebb`. We can just use a major stride. - self.ebbs[ebb].seq = prev_seq + MAJOR_STRIDE; + // There is nothing after `block`. We can just use a major stride. + self.blocks[block].seq = prev_seq + MAJOR_STRIDE; return; }; // Check if there is room between these sequence numbers. if let Some(seq) = midpoint(prev_seq, next_seq) { - self.ebbs[ebb].seq = seq; + self.blocks[block].seq = seq; } else { // No available integers between `prev_seq` and `next_seq`. We have to renumber. - self.renumber_from_ebb(ebb, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT); + self.renumber_from_block(block, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT); } } /// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may /// require renumbering. fn assign_inst_seq(&mut self, inst: Inst) { - let ebb = self - .inst_ebb(inst) + let block = self + .inst_block(inst) .expect("inst must be inserted before assigning an seq"); // Get the sequence number immediately before `inst`. let prev_seq = match self.insts[inst].prev.expand() { Some(prev_inst) => self.insts[prev_inst].seq, - None => self.ebbs[ebb].seq, + None => self.blocks[block].seq, }; // Get the sequence number immediately following `inst`. let next_seq = if let Some(next_inst) = self.insts[inst].next.expand() { self.insts[next_inst].seq - } else if let Some(next_ebb) = self.ebbs[ebb].next.expand() { - self.ebbs[next_ebb].seq + } else if let Some(next_block) = self.blocks[block].next.expand() { + self.blocks[next_block].seq } else { // There is nothing after `inst`. We can just use a major stride. self.insts[inst].seq = prev_seq + MAJOR_STRIDE; @@ -219,7 +219,7 @@ impl Layout { } } - /// Renumber instructions starting from `inst` until the end of the EBB or until numbers catch + /// Renumber instructions starting from `inst` until the end of the block or until numbers catch /// up. /// /// Return `None` if renumbering has caught up and the sequence is monotonic again. Otherwise @@ -260,31 +260,36 @@ impl Layout { } } - /// Renumber starting from `ebb` to `seq` and continuing until the sequence numbers are + /// Renumber starting from `block` to `seq` and continuing until the sequence numbers are /// monotonic again. - fn renumber_from_ebb(&mut self, ebb: Ebb, first_seq: SequenceNumber, limit: SequenceNumber) { - let mut ebb = ebb; + fn renumber_from_block( + &mut self, + block: Block, + first_seq: SequenceNumber, + limit: SequenceNumber, + ) { + let mut block = block; let mut seq = first_seq; loop { - self.ebbs[ebb].seq = seq; + self.blocks[block].seq = seq; - // Renumber instructions in `ebb`. Stop when the numbers catch up. - if let Some(inst) = self.ebbs[ebb].first_inst.expand() { + // Renumber instructions in `block`. Stop when the numbers catch up. + if let Some(inst) = self.blocks[block].first_inst.expand() { seq = match self.renumber_insts(inst, seq + MINOR_STRIDE, limit) { Some(s) => s, None => return, } } - // Advance to the next EBB. - ebb = match self.ebbs[ebb].next.expand() { + // Advance to the next block. + block = match self.blocks[block].next.expand() { Some(next) => next, None => return, }; // Stop renumbering once the numbers catch up. - if seq < self.ebbs[ebb].seq { + if seq < self.blocks[block].seq { return; } @@ -296,27 +301,27 @@ impl Layout { /// monotonic again. fn renumber_from_inst(&mut self, inst: Inst, first_seq: SequenceNumber, limit: SequenceNumber) { if let Some(seq) = self.renumber_insts(inst, first_seq, limit) { - // Renumbering spills over into next EBB. - if let Some(next_ebb) = self.ebbs[self.inst_ebb(inst).unwrap()].next.expand() { - self.renumber_from_ebb(next_ebb, seq + MINOR_STRIDE, limit); + // Renumbering spills over into next block. + if let Some(next_block) = self.blocks[self.inst_block(inst).unwrap()].next.expand() { + self.renumber_from_block(next_block, seq + MINOR_STRIDE, limit); } } } - /// Renumber all EBBs and instructions in the layout. + /// Renumber all blocks and instructions in the layout. /// /// This doesn't affect the position of anything, but it gives more room in the internal /// sequence numbers for inserting instructions later. fn full_renumber(&mut self) { let _tt = timing::layout_renumber(); let mut seq = 0; - let mut next_ebb = self.first_ebb; - while let Some(ebb) = next_ebb { - self.ebbs[ebb].seq = seq; + let mut next_block = self.first_block; + while let Some(block) = next_block { + self.blocks[block].seq = seq; seq += MAJOR_STRIDE; - next_ebb = self.ebbs[ebb].next.expand(); + next_block = self.blocks[block].next.expand(); - let mut next_inst = self.ebbs[ebb].first_inst.expand(); + let mut next_inst = self.blocks[block].first_inst.expand(); while let Some(inst) = next_inst { self.insts[inst].seq = seq; seq += MAJOR_STRIDE; @@ -327,169 +332,169 @@ impl Layout { } } -/// Methods for laying out EBBs. +/// Methods for laying out blocks. /// -/// An unknown EBB starts out as *not inserted* in the EBB layout. The layout is a linear order of -/// inserted EBBs. Once an EBB has been inserted in the layout, instructions can be added. An EBB +/// An unknown block starts out as *not inserted* in the block layout. The layout is a linear order of +/// inserted blocks. Once an block has been inserted in the layout, instructions can be added. An block /// can only be removed from the layout when it is empty. /// -/// Since every EBB must end with a terminator instruction which cannot fall through, the layout of -/// EBBs do not affect the semantics of the program. +/// Since every block must end with a terminator instruction which cannot fall through, the layout of +/// blocks do not affect the semantics of the program. /// impl Layout { - /// Is `ebb` currently part of the layout? - pub fn is_ebb_inserted(&self, ebb: Ebb) -> bool { - Some(ebb) == self.first_ebb || self.ebbs[ebb].prev.is_some() + /// Is `block` currently part of the layout? + pub fn is_block_inserted(&self, block: Block) -> bool { + Some(block) == self.first_block || self.blocks[block].prev.is_some() } - /// Insert `ebb` as the last EBB in the layout. - pub fn append_ebb(&mut self, ebb: Ebb) { + /// Insert `block` as the last block in the layout. + pub fn append_block(&mut self, block: Block) { debug_assert!( - !self.is_ebb_inserted(ebb), - "Cannot append EBB that is already in the layout" + !self.is_block_inserted(block), + "Cannot append block that is already in the layout" ); { - let node = &mut self.ebbs[ebb]; + let node = &mut self.blocks[block]; debug_assert!(node.first_inst.is_none() && node.last_inst.is_none()); - node.prev = self.last_ebb.into(); + node.prev = self.last_block.into(); node.next = None.into(); } - if let Some(last) = self.last_ebb { - self.ebbs[last].next = ebb.into(); + if let Some(last) = self.last_block { + self.blocks[last].next = block.into(); } else { - self.first_ebb = Some(ebb); + self.first_block = Some(block); } - self.last_ebb = Some(ebb); - self.assign_ebb_seq(ebb); + self.last_block = Some(block); + self.assign_block_seq(block); } - /// Insert `ebb` in the layout before the existing EBB `before`. - pub fn insert_ebb(&mut self, ebb: Ebb, before: Ebb) { + /// Insert `block` in the layout before the existing block `before`. + pub fn insert_block(&mut self, block: Block, before: Block) { debug_assert!( - !self.is_ebb_inserted(ebb), - "Cannot insert EBB that is already in the layout" + !self.is_block_inserted(block), + "Cannot insert block that is already in the layout" ); debug_assert!( - self.is_ebb_inserted(before), - "EBB Insertion point not in the layout" + self.is_block_inserted(before), + "block Insertion point not in the layout" ); - let after = self.ebbs[before].prev; + let after = self.blocks[before].prev; { - let node = &mut self.ebbs[ebb]; + let node = &mut self.blocks[block]; node.next = before.into(); node.prev = after; } - self.ebbs[before].prev = ebb.into(); + self.blocks[before].prev = block.into(); match after.expand() { - None => self.first_ebb = Some(ebb), - Some(a) => self.ebbs[a].next = ebb.into(), + None => self.first_block = Some(block), + Some(a) => self.blocks[a].next = block.into(), } - self.assign_ebb_seq(ebb); + self.assign_block_seq(block); } - /// Insert `ebb` in the layout *after* the existing EBB `after`. - pub fn insert_ebb_after(&mut self, ebb: Ebb, after: Ebb) { + /// Insert `block` in the layout *after* the existing block `after`. + pub fn insert_block_after(&mut self, block: Block, after: Block) { debug_assert!( - !self.is_ebb_inserted(ebb), - "Cannot insert EBB that is already in the layout" + !self.is_block_inserted(block), + "Cannot insert block that is already in the layout" ); debug_assert!( - self.is_ebb_inserted(after), - "EBB Insertion point not in the layout" + self.is_block_inserted(after), + "block Insertion point not in the layout" ); - let before = self.ebbs[after].next; + let before = self.blocks[after].next; { - let node = &mut self.ebbs[ebb]; + let node = &mut self.blocks[block]; node.next = before; node.prev = after.into(); } - self.ebbs[after].next = ebb.into(); + self.blocks[after].next = block.into(); match before.expand() { - None => self.last_ebb = Some(ebb), - Some(b) => self.ebbs[b].prev = ebb.into(), + None => self.last_block = Some(block), + Some(b) => self.blocks[b].prev = block.into(), } - self.assign_ebb_seq(ebb); + self.assign_block_seq(block); } - /// Remove `ebb` from the layout. - pub fn remove_ebb(&mut self, ebb: Ebb) { - debug_assert!(self.is_ebb_inserted(ebb), "EBB not in the layout"); - debug_assert!(self.first_inst(ebb).is_none(), "EBB must be empty."); + /// Remove `block` from the layout. + pub fn remove_block(&mut self, block: Block) { + debug_assert!(self.is_block_inserted(block), "block not in the layout"); + debug_assert!(self.first_inst(block).is_none(), "block must be empty."); - // Clear the `ebb` node and extract links. + // Clear the `block` node and extract links. let prev; let next; { - let n = &mut self.ebbs[ebb]; + let n = &mut self.blocks[block]; prev = n.prev; next = n.next; n.prev = None.into(); n.next = None.into(); } - // Fix up links to `ebb`. + // Fix up links to `block`. match prev.expand() { - None => self.first_ebb = next.expand(), - Some(p) => self.ebbs[p].next = next, + None => self.first_block = next.expand(), + Some(p) => self.blocks[p].next = next, } match next.expand() { - None => self.last_ebb = prev.expand(), - Some(n) => self.ebbs[n].prev = prev, + None => self.last_block = prev.expand(), + Some(n) => self.blocks[n].prev = prev, } } - /// Return an iterator over all EBBs in layout order. - pub fn ebbs(&self) -> Ebbs { - Ebbs { + /// Return an iterator over all blocks in layout order. + pub fn blocks(&self) -> Blocks { + Blocks { layout: self, - next: self.first_ebb, + next: self.first_block, } } /// Get the function's entry block. - /// This is simply the first EBB in the layout order. - pub fn entry_block(&self) -> Option { - self.first_ebb + /// This is simply the first block in the layout order. + pub fn entry_block(&self) -> Option { + self.first_block } - /// Get the last EBB in the layout. - pub fn last_ebb(&self) -> Option { - self.last_ebb + /// Get the last block in the layout. + pub fn last_block(&self) -> Option { + self.last_block } - /// Get the block preceding `ebb` in the layout order. - pub fn prev_ebb(&self, ebb: Ebb) -> Option { - self.ebbs[ebb].prev.expand() + /// Get the block preceding `block` in the layout order. + pub fn prev_block(&self, block: Block) -> Option { + self.blocks[block].prev.expand() } - /// Get the block following `ebb` in the layout order. - pub fn next_ebb(&self, ebb: Ebb) -> Option { - self.ebbs[ebb].next.expand() + /// Get the block following `block` in the layout order. + pub fn next_block(&self, block: Block) -> Option { + self.blocks[block].next.expand() } } #[derive(Clone, Debug, Default)] -struct EbbNode { - prev: PackedOption, - next: PackedOption, +struct BlockNode { + prev: PackedOption, + next: PackedOption, first_inst: PackedOption, last_inst: PackedOption, seq: SequenceNumber, } -/// Iterate over EBBs in layout order. See `Layout::ebbs()`. -pub struct Ebbs<'f> { +/// Iterate over blocks in layout order. See `Layout::blocks()`. +pub struct Blocks<'f> { layout: &'f Layout, - next: Option, + next: Option, } -impl<'f> Iterator for Ebbs<'f> { - type Item = Ebb; +impl<'f> Iterator for Blocks<'f> { + type Item = Block; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option { match self.next { - Some(ebb) => { - self.next = self.layout.next_ebb(ebb); - Some(ebb) + Some(block) => { + self.next = self.layout.next_block(block); + Some(block) } None => None, } @@ -498,70 +503,70 @@ impl<'f> Iterator for Ebbs<'f> { /// Use a layout reference in a for loop. impl<'f> IntoIterator for &'f Layout { - type Item = Ebb; - type IntoIter = Ebbs<'f>; + type Item = Block; + type IntoIter = Blocks<'f>; - fn into_iter(self) -> Ebbs<'f> { - self.ebbs() + fn into_iter(self) -> Blocks<'f> { + self.blocks() } } /// Methods for arranging instructions. /// /// An instruction starts out as *not inserted* in the layout. An instruction can be inserted into -/// an EBB at a given position. +/// an block at a given position. impl Layout { - /// Get the EBB containing `inst`, or `None` if `inst` is not inserted in the layout. - pub fn inst_ebb(&self, inst: Inst) -> Option { - self.insts[inst].ebb.into() + /// Get the block containing `inst`, or `None` if `inst` is not inserted in the layout. + pub fn inst_block(&self, inst: Inst) -> Option { + self.insts[inst].block.into() } - /// Get the EBB containing the program point `pp`. Panic if `pp` is not in the layout. - pub fn pp_ebb(&self, pp: PP) -> Ebb + /// Get the block containing the program point `pp`. Panic if `pp` is not in the layout. + pub fn pp_block(&self, pp: PP) -> Block where PP: Into, { match pp.into() { - ExpandedProgramPoint::Ebb(ebb) => ebb, + ExpandedProgramPoint::Block(block) => block, ExpandedProgramPoint::Inst(inst) => { - self.inst_ebb(inst).expect("Program point not in layout") + self.inst_block(inst).expect("Program point not in layout") } } } - /// Append `inst` to the end of `ebb`. - pub fn append_inst(&mut self, inst: Inst, ebb: Ebb) { - debug_assert_eq!(self.inst_ebb(inst), None); + /// Append `inst` to the end of `block`. + pub fn append_inst(&mut self, inst: Inst, block: Block) { + debug_assert_eq!(self.inst_block(inst), None); debug_assert!( - self.is_ebb_inserted(ebb), - "Cannot append instructions to EBB not in layout" + self.is_block_inserted(block), + "Cannot append instructions to block not in layout" ); { - let ebb_node = &mut self.ebbs[ebb]; + let block_node = &mut self.blocks[block]; { let inst_node = &mut self.insts[inst]; - inst_node.ebb = ebb.into(); - inst_node.prev = ebb_node.last_inst; + inst_node.block = block.into(); + inst_node.prev = block_node.last_inst; debug_assert!(inst_node.next.is_none()); } - if ebb_node.first_inst.is_none() { - ebb_node.first_inst = inst.into(); + if block_node.first_inst.is_none() { + block_node.first_inst = inst.into(); } else { - self.insts[ebb_node.last_inst.unwrap()].next = inst.into(); + self.insts[block_node.last_inst.unwrap()].next = inst.into(); } - ebb_node.last_inst = inst.into(); + block_node.last_inst = inst.into(); } self.assign_inst_seq(inst); } - /// Fetch an ebb's first instruction. - pub fn first_inst(&self, ebb: Ebb) -> Option { - self.ebbs[ebb].first_inst.into() + /// Fetch an block's first instruction. + pub fn first_inst(&self, block: Block) -> Option { + self.blocks[block].first_inst.into() } - /// Fetch an ebb's last instruction. - pub fn last_inst(&self, ebb: Ebb) -> Option { - self.ebbs[ebb].last_inst.into() + /// Fetch an block's last instruction. + pub fn last_inst(&self, block: Block) -> Option { + self.blocks[block].last_inst.into() } /// Fetch the instruction following `inst`. @@ -574,11 +579,11 @@ impl Layout { self.insts[inst].prev.expand() } - /// Fetch the first instruction in an ebb's terminal branch group. - pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, ebb: Ebb) -> Option { + /// Fetch the first instruction in an block's terminal branch group. + pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, block: Block) -> Option { // Basic blocks permit at most two terminal branch instructions. // If two, the former is conditional and the latter is unconditional. - let last = self.last_inst(ebb)?; + let last = self.last_inst(block)?; if let Some(prev) = self.prev_inst(last) { if dfg[prev].opcode().is_branch() { return Some(prev); @@ -587,22 +592,22 @@ impl Layout { Some(last) } - /// Insert `inst` before the instruction `before` in the same EBB. + /// Insert `inst` before the instruction `before` in the same block. pub fn insert_inst(&mut self, inst: Inst, before: Inst) { - debug_assert_eq!(self.inst_ebb(inst), None); - let ebb = self - .inst_ebb(before) + debug_assert_eq!(self.inst_block(inst), None); + let block = self + .inst_block(before) .expect("Instruction before insertion point not in the layout"); let after = self.insts[before].prev; { let inst_node = &mut self.insts[inst]; - inst_node.ebb = ebb.into(); + inst_node.block = block.into(); inst_node.next = before.into(); inst_node.prev = after; } self.insts[before].prev = inst.into(); match after.expand() { - None => self.ebbs[ebb].first_inst = inst.into(), + None => self.blocks[block].first_inst = inst.into(), Some(a) => self.insts[a].next = inst.into(), } self.assign_inst_seq(inst); @@ -610,7 +615,7 @@ impl Layout { /// Remove `inst` from the layout. pub fn remove_inst(&mut self, inst: Inst) { - let ebb = self.inst_ebb(inst).expect("Instruction already removed."); + let block = self.inst_block(inst).expect("Instruction already removed."); // Clear the `inst` node and extract links. let prev; let next; @@ -618,37 +623,37 @@ impl Layout { let n = &mut self.insts[inst]; prev = n.prev; next = n.next; - n.ebb = None.into(); + n.block = None.into(); n.prev = None.into(); n.next = None.into(); } // Fix up links to `inst`. match prev.expand() { - None => self.ebbs[ebb].first_inst = next, + None => self.blocks[block].first_inst = next, Some(p) => self.insts[p].next = next, } match next.expand() { - None => self.ebbs[ebb].last_inst = prev, + None => self.blocks[block].last_inst = prev, Some(n) => self.insts[n].prev = prev, } } - /// Iterate over the instructions in `ebb` in layout order. - pub fn ebb_insts(&self, ebb: Ebb) -> Insts { + /// Iterate over the instructions in `block` in layout order. + pub fn block_insts(&self, block: Block) -> Insts { Insts { layout: self, - head: self.ebbs[ebb].first_inst.into(), - tail: self.ebbs[ebb].last_inst.into(), + head: self.blocks[block].first_inst.into(), + tail: self.blocks[block].last_inst.into(), } } - /// Split the EBB containing `before` in two. + /// Split the block containing `before` in two. /// - /// Insert `new_ebb` after the old EBB and move `before` and the following instructions to - /// `new_ebb`: + /// Insert `new_block` after the old block and move `before` and the following instructions to + /// `new_block`: /// /// ```text - /// old_ebb: + /// old_block: /// i1 /// i2 /// i3 << before @@ -657,69 +662,69 @@ impl Layout { /// becomes: /// /// ```text - /// old_ebb: + /// old_block: /// i1 /// i2 - /// new_ebb: + /// new_block: /// i3 << before /// i4 /// ``` - pub fn split_ebb(&mut self, new_ebb: Ebb, before: Inst) { - let old_ebb = self - .inst_ebb(before) + pub fn split_block(&mut self, new_block: Block, before: Inst) { + let old_block = self + .inst_block(before) .expect("The `before` instruction must be in the layout"); - debug_assert!(!self.is_ebb_inserted(new_ebb)); + debug_assert!(!self.is_block_inserted(new_block)); - // Insert new_ebb after old_ebb. - let next_ebb = self.ebbs[old_ebb].next; - let last_inst = self.ebbs[old_ebb].last_inst; + // Insert new_block after old_block. + let next_block = self.blocks[old_block].next; + let last_inst = self.blocks[old_block].last_inst; { - let node = &mut self.ebbs[new_ebb]; - node.prev = old_ebb.into(); - node.next = next_ebb; + let node = &mut self.blocks[new_block]; + node.prev = old_block.into(); + node.next = next_block; node.first_inst = before.into(); node.last_inst = last_inst; } - self.ebbs[old_ebb].next = new_ebb.into(); + self.blocks[old_block].next = new_block.into(); // Fix backwards link. - if Some(old_ebb) == self.last_ebb { - self.last_ebb = Some(new_ebb); + if Some(old_block) == self.last_block { + self.last_block = Some(new_block); } else { - self.ebbs[next_ebb.unwrap()].prev = new_ebb.into(); + self.blocks[next_block.unwrap()].prev = new_block.into(); } // Disconnect the instruction links. let prev_inst = self.insts[before].prev; self.insts[before].prev = None.into(); - self.ebbs[old_ebb].last_inst = prev_inst; + self.blocks[old_block].last_inst = prev_inst; match prev_inst.expand() { - None => self.ebbs[old_ebb].first_inst = None.into(), + None => self.blocks[old_block].first_inst = None.into(), Some(pi) => self.insts[pi].next = None.into(), } - // Fix the instruction -> ebb pointers. + // Fix the instruction -> block pointers. let mut opt_i = Some(before); while let Some(i) = opt_i { - debug_assert_eq!(self.insts[i].ebb.expand(), Some(old_ebb)); - self.insts[i].ebb = new_ebb.into(); + debug_assert_eq!(self.insts[i].block.expand(), Some(old_block)); + self.insts[i].block = new_block.into(); opt_i = self.insts[i].next.into(); } - self.assign_ebb_seq(new_ebb); + self.assign_block_seq(new_block); } } #[derive(Clone, Debug, Default)] struct InstNode { - /// The Ebb containing this instruction, or `None` if the instruction is not yet inserted. - ebb: PackedOption, + /// The Block containing this instruction, or `None` if the instruction is not yet inserted. + block: PackedOption, prev: PackedOption, next: PackedOption, seq: SequenceNumber, } -/// Iterate over instructions in an EBB in layout order. See `Layout::ebb_insts()`. +/// Iterate over instructions in an block in layout order. See `Layout::block_insts()`. pub struct Insts<'f> { layout: &'f Layout, head: Option, @@ -763,7 +768,7 @@ mod tests { use super::Layout; use crate::cursor::{Cursor, CursorPosition}; use crate::entity::EntityRef; - use crate::ir::{Ebb, Inst, ProgramOrder, SourceLoc}; + use crate::ir::{Block, Inst, ProgramOrder, SourceLoc}; use alloc::vec::Vec; use core::cmp::Ordering; @@ -810,76 +815,76 @@ mod tests { } } - fn verify(layout: &mut Layout, ebbs: &[(Ebb, &[Inst])]) { - // Check that EBBs are inserted and instructions belong the right places. + fn verify(layout: &mut Layout, blocks: &[(Block, &[Inst])]) { + // Check that blocks are inserted and instructions belong the right places. // Check forward linkage with iterators. // Check that layout sequence numbers are strictly monotonic. { let mut seq = 0; - let mut ebb_iter = layout.ebbs(); - for &(ebb, insts) in ebbs { - assert!(layout.is_ebb_inserted(ebb)); - assert_eq!(ebb_iter.next(), Some(ebb)); - assert!(layout.ebbs[ebb].seq > seq); - seq = layout.ebbs[ebb].seq; + let mut block_iter = layout.blocks(); + for &(block, insts) in blocks { + assert!(layout.is_block_inserted(block)); + assert_eq!(block_iter.next(), Some(block)); + assert!(layout.blocks[block].seq > seq); + seq = layout.blocks[block].seq; - let mut inst_iter = layout.ebb_insts(ebb); + let mut inst_iter = layout.block_insts(block); for &inst in insts { - assert_eq!(layout.inst_ebb(inst), Some(ebb)); + assert_eq!(layout.inst_block(inst), Some(block)); assert_eq!(inst_iter.next(), Some(inst)); assert!(layout.insts[inst].seq > seq); seq = layout.insts[inst].seq; } assert_eq!(inst_iter.next(), None); } - assert_eq!(ebb_iter.next(), None); + assert_eq!(block_iter.next(), None); } // Check backwards linkage with a cursor. let mut cur = LayoutCursor::new(layout); - for &(ebb, insts) in ebbs.into_iter().rev() { - assert_eq!(cur.prev_ebb(), Some(ebb)); + for &(block, insts) in blocks.into_iter().rev() { + assert_eq!(cur.prev_block(), Some(block)); for &inst in insts.into_iter().rev() { assert_eq!(cur.prev_inst(), Some(inst)); } assert_eq!(cur.prev_inst(), None); } - assert_eq!(cur.prev_ebb(), None); + assert_eq!(cur.prev_block(), None); } #[test] - fn append_ebb() { + fn append_block() { let mut layout = Layout::new(); - let e0 = Ebb::new(0); - let e1 = Ebb::new(1); - let e2 = Ebb::new(2); + let e0 = Block::new(0); + let e1 = Block::new(1); + let e2 = Block::new(2); { let imm = &layout; - assert!(!imm.is_ebb_inserted(e0)); - assert!(!imm.is_ebb_inserted(e1)); + assert!(!imm.is_block_inserted(e0)); + assert!(!imm.is_block_inserted(e1)); } verify(&mut layout, &[]); - layout.append_ebb(e1); - assert!(!layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(!layout.is_ebb_inserted(e2)); - let v: Vec = layout.ebbs().collect(); + layout.append_block(e1); + assert!(!layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(!layout.is_block_inserted(e2)); + let v: Vec = layout.blocks().collect(); assert_eq!(v, [e1]); - layout.append_ebb(e2); - assert!(!layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(layout.is_ebb_inserted(e2)); - let v: Vec = layout.ebbs().collect(); + layout.append_block(e2); + assert!(!layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(layout.is_block_inserted(e2)); + let v: Vec = layout.blocks().collect(); assert_eq!(v, [e1, e2]); - layout.append_ebb(e0); - assert!(layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(layout.is_ebb_inserted(e2)); - let v: Vec = layout.ebbs().collect(); + layout.append_block(e0); + assert!(layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(layout.is_block_inserted(e2)); + let v: Vec = layout.blocks().collect(); assert_eq!(v, [e1, e2, e0]); { @@ -899,111 +904,111 @@ mod tests { assert_eq!(cur.prev_inst(), None); assert_eq!(cur.position(), CursorPosition::Nowhere); - assert_eq!(cur.next_ebb(), Some(e1)); + assert_eq!(cur.next_block(), Some(e1)); assert_eq!(cur.position(), CursorPosition::Before(e1)); assert_eq!(cur.next_inst(), None); assert_eq!(cur.position(), CursorPosition::After(e1)); assert_eq!(cur.next_inst(), None); assert_eq!(cur.position(), CursorPosition::After(e1)); - assert_eq!(cur.next_ebb(), Some(e2)); + assert_eq!(cur.next_block(), Some(e2)); assert_eq!(cur.prev_inst(), None); assert_eq!(cur.position(), CursorPosition::Before(e2)); - assert_eq!(cur.next_ebb(), Some(e0)); - assert_eq!(cur.next_ebb(), None); + assert_eq!(cur.next_block(), Some(e0)); + assert_eq!(cur.next_block(), None); assert_eq!(cur.position(), CursorPosition::Nowhere); - // Backwards through the EBBs. - assert_eq!(cur.prev_ebb(), Some(e0)); + // Backwards through the blocks. + assert_eq!(cur.prev_block(), Some(e0)); assert_eq!(cur.position(), CursorPosition::After(e0)); - assert_eq!(cur.prev_ebb(), Some(e2)); - assert_eq!(cur.prev_ebb(), Some(e1)); - assert_eq!(cur.prev_ebb(), None); + assert_eq!(cur.prev_block(), Some(e2)); + assert_eq!(cur.prev_block(), Some(e1)); + assert_eq!(cur.prev_block(), None); assert_eq!(cur.position(), CursorPosition::Nowhere); } #[test] - fn insert_ebb() { + fn insert_block() { let mut layout = Layout::new(); - let e0 = Ebb::new(0); - let e1 = Ebb::new(1); - let e2 = Ebb::new(2); + let e0 = Block::new(0); + let e1 = Block::new(1); + let e2 = Block::new(2); { let imm = &layout; - assert!(!imm.is_ebb_inserted(e0)); - assert!(!imm.is_ebb_inserted(e1)); + assert!(!imm.is_block_inserted(e0)); + assert!(!imm.is_block_inserted(e1)); - let v: Vec = layout.ebbs().collect(); + let v: Vec = layout.blocks().collect(); assert_eq!(v, []); } - layout.append_ebb(e1); - assert!(!layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(!layout.is_ebb_inserted(e2)); + layout.append_block(e1); + assert!(!layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(!layout.is_block_inserted(e2)); verify(&mut layout, &[(e1, &[])]); - layout.insert_ebb(e2, e1); - assert!(!layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(layout.is_ebb_inserted(e2)); + layout.insert_block(e2, e1); + assert!(!layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(layout.is_block_inserted(e2)); verify(&mut layout, &[(e2, &[]), (e1, &[])]); - layout.insert_ebb(e0, e1); - assert!(layout.is_ebb_inserted(e0)); - assert!(layout.is_ebb_inserted(e1)); - assert!(layout.is_ebb_inserted(e2)); + layout.insert_block(e0, e1); + assert!(layout.is_block_inserted(e0)); + assert!(layout.is_block_inserted(e1)); + assert!(layout.is_block_inserted(e2)); verify(&mut layout, &[(e2, &[]), (e0, &[]), (e1, &[])]); } #[test] - fn insert_ebb_after() { + fn insert_block_after() { let mut layout = Layout::new(); - let e0 = Ebb::new(0); - let e1 = Ebb::new(1); - let e2 = Ebb::new(2); + let e0 = Block::new(0); + let e1 = Block::new(1); + let e2 = Block::new(2); - layout.append_ebb(e1); - layout.insert_ebb_after(e2, e1); + layout.append_block(e1); + layout.insert_block_after(e2, e1); verify(&mut layout, &[(e1, &[]), (e2, &[])]); - layout.insert_ebb_after(e0, e1); + layout.insert_block_after(e0, e1); verify(&mut layout, &[(e1, &[]), (e0, &[]), (e2, &[])]); } #[test] fn append_inst() { let mut layout = Layout::new(); - let e1 = Ebb::new(1); + let e1 = Block::new(1); - layout.append_ebb(e1); - let v: Vec = layout.ebb_insts(e1).collect(); + layout.append_block(e1); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, []); let i0 = Inst::new(0); let i1 = Inst::new(1); let i2 = Inst::new(2); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), None); - assert_eq!(layout.inst_ebb(i2), None); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), None); + assert_eq!(layout.inst_block(i2), None); layout.append_inst(i1, e1); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), Some(e1)); - assert_eq!(layout.inst_ebb(i2), None); - let v: Vec = layout.ebb_insts(e1).collect(); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), Some(e1)); + assert_eq!(layout.inst_block(i2), None); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, [i1]); layout.append_inst(i2, e1); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), Some(e1)); - assert_eq!(layout.inst_ebb(i2), Some(e1)); - let v: Vec = layout.ebb_insts(e1).collect(); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), Some(e1)); + assert_eq!(layout.inst_block(i2), Some(e1)); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, [i1, i2]); // Test double-ended instruction iterator. - let v: Vec = layout.ebb_insts(e1).rev().collect(); + let v: Vec = layout.block_insts(e1).rev().collect(); assert_eq!(v, [i2, i1]); layout.append_inst(i0, e1); @@ -1036,45 +1041,45 @@ mod tests { cur.goto_inst(i2); assert_eq!(cur.remove_inst(), i2); verify(cur.layout, &[(e1, &[i1, i0])]); - assert_eq!(cur.layout.inst_ebb(i2), None); + assert_eq!(cur.layout.inst_block(i2), None); assert_eq!(cur.remove_inst(), i0); verify(cur.layout, &[(e1, &[i1])]); - assert_eq!(cur.layout.inst_ebb(i0), None); + assert_eq!(cur.layout.inst_block(i0), None); assert_eq!(cur.position(), CursorPosition::After(e1)); cur.layout.remove_inst(i1); verify(cur.layout, &[(e1, &[])]); - assert_eq!(cur.layout.inst_ebb(i1), None); + assert_eq!(cur.layout.inst_block(i1), None); } #[test] fn insert_inst() { let mut layout = Layout::new(); - let e1 = Ebb::new(1); + let e1 = Block::new(1); - layout.append_ebb(e1); - let v: Vec = layout.ebb_insts(e1).collect(); + layout.append_block(e1); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, []); let i0 = Inst::new(0); let i1 = Inst::new(1); let i2 = Inst::new(2); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), None); - assert_eq!(layout.inst_ebb(i2), None); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), None); + assert_eq!(layout.inst_block(i2), None); layout.append_inst(i1, e1); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), Some(e1)); - assert_eq!(layout.inst_ebb(i2), None); - let v: Vec = layout.ebb_insts(e1).collect(); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), Some(e1)); + assert_eq!(layout.inst_block(i2), None); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, [i1]); layout.insert_inst(i2, i1); - assert_eq!(layout.inst_ebb(i0), None); - assert_eq!(layout.inst_ebb(i1), Some(e1)); - assert_eq!(layout.inst_ebb(i2), Some(e1)); - let v: Vec = layout.ebb_insts(e1).collect(); + assert_eq!(layout.inst_block(i0), None); + assert_eq!(layout.inst_block(i1), Some(e1)); + assert_eq!(layout.inst_block(i2), Some(e1)); + let v: Vec = layout.block_insts(e1).collect(); assert_eq!(v, [i2, i1]); layout.insert_inst(i0, i1); @@ -1082,16 +1087,16 @@ mod tests { } #[test] - fn multiple_ebbs() { + fn multiple_blocks() { let mut layout = Layout::new(); - let e0 = Ebb::new(0); - let e1 = Ebb::new(1); + let e0 = Block::new(0); + let e1 = Block::new(1); assert_eq!(layout.entry_block(), None); - layout.append_ebb(e0); + layout.append_block(e0); assert_eq!(layout.entry_block(), Some(e0)); - layout.append_ebb(e1); + layout.append_block(e1); assert_eq!(layout.entry_block(), Some(e0)); let i0 = Inst::new(0); @@ -1104,84 +1109,84 @@ mod tests { layout.append_inst(i2, e1); layout.append_inst(i3, e1); - let v0: Vec = layout.ebb_insts(e0).collect(); - let v1: Vec = layout.ebb_insts(e1).collect(); + let v0: Vec = layout.block_insts(e0).collect(); + let v1: Vec = layout.block_insts(e1).collect(); assert_eq!(v0, [i0, i1]); assert_eq!(v1, [i2, i3]); } #[test] - fn split_ebb() { + fn split_block() { let mut layout = Layout::new(); - let e0 = Ebb::new(0); - let e1 = Ebb::new(1); - let e2 = Ebb::new(2); + let e0 = Block::new(0); + let e1 = Block::new(1); + let e2 = Block::new(2); let i0 = Inst::new(0); let i1 = Inst::new(1); let i2 = Inst::new(2); let i3 = Inst::new(3); - layout.append_ebb(e0); + layout.append_block(e0); layout.append_inst(i0, e0); - assert_eq!(layout.inst_ebb(i0), Some(e0)); - layout.split_ebb(e1, i0); - assert_eq!(layout.inst_ebb(i0), Some(e1)); + assert_eq!(layout.inst_block(i0), Some(e0)); + layout.split_block(e1, i0); + assert_eq!(layout.inst_block(i0), Some(e1)); { let mut cur = LayoutCursor::new(&mut layout); - assert_eq!(cur.next_ebb(), Some(e0)); + assert_eq!(cur.next_block(), Some(e0)); assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_ebb(), Some(e1)); + assert_eq!(cur.next_block(), Some(e1)); assert_eq!(cur.next_inst(), Some(i0)); assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_ebb(), None); + assert_eq!(cur.next_block(), None); // Check backwards links. - assert_eq!(cur.prev_ebb(), Some(e1)); + assert_eq!(cur.prev_block(), Some(e1)); assert_eq!(cur.prev_inst(), Some(i0)); assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_ebb(), Some(e0)); + assert_eq!(cur.prev_block(), Some(e0)); assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_ebb(), None); + assert_eq!(cur.prev_block(), None); } layout.append_inst(i1, e0); layout.append_inst(i2, e0); layout.append_inst(i3, e0); - layout.split_ebb(e2, i2); + layout.split_block(e2, i2); - assert_eq!(layout.inst_ebb(i0), Some(e1)); - assert_eq!(layout.inst_ebb(i1), Some(e0)); - assert_eq!(layout.inst_ebb(i2), Some(e2)); - assert_eq!(layout.inst_ebb(i3), Some(e2)); + assert_eq!(layout.inst_block(i0), Some(e1)); + assert_eq!(layout.inst_block(i1), Some(e0)); + assert_eq!(layout.inst_block(i2), Some(e2)); + assert_eq!(layout.inst_block(i3), Some(e2)); { let mut cur = LayoutCursor::new(&mut layout); - assert_eq!(cur.next_ebb(), Some(e0)); + assert_eq!(cur.next_block(), Some(e0)); assert_eq!(cur.next_inst(), Some(i1)); assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_ebb(), Some(e2)); + assert_eq!(cur.next_block(), Some(e2)); assert_eq!(cur.next_inst(), Some(i2)); assert_eq!(cur.next_inst(), Some(i3)); assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_ebb(), Some(e1)); + assert_eq!(cur.next_block(), Some(e1)); assert_eq!(cur.next_inst(), Some(i0)); assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_ebb(), None); + assert_eq!(cur.next_block(), None); - assert_eq!(cur.prev_ebb(), Some(e1)); + assert_eq!(cur.prev_block(), Some(e1)); assert_eq!(cur.prev_inst(), Some(i0)); assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_ebb(), Some(e2)); + assert_eq!(cur.prev_block(), Some(e2)); assert_eq!(cur.prev_inst(), Some(i3)); assert_eq!(cur.prev_inst(), Some(i2)); assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_ebb(), Some(e0)); + assert_eq!(cur.prev_block(), Some(e0)); assert_eq!(cur.prev_inst(), Some(i1)); assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_ebb(), None); + assert_eq!(cur.prev_block(), None); } // Check `ProgramOrder`. @@ -1189,9 +1194,9 @@ mod tests { assert_eq!(layout.cmp(e2, i2), Ordering::Less); assert_eq!(layout.cmp(i3, i2), Ordering::Greater); - assert_eq!(layout.is_ebb_gap(i1, e2), true); - assert_eq!(layout.is_ebb_gap(i3, e1), true); - assert_eq!(layout.is_ebb_gap(i1, e1), false); - assert_eq!(layout.is_ebb_gap(i2, e1), false); + assert_eq!(layout.is_block_gap(i1, e2), true); + assert_eq!(layout.is_block_gap(i3, e1), true); + assert_eq!(layout.is_block_gap(i1, e1), false); + assert_eq!(layout.is_block_gap(i2, e1), false); } } diff --git a/cranelift/codegen/src/ir/mod.rs b/cranelift/codegen/src/ir/mod.rs index 096e372db0..3c222ca9f5 100644 --- a/cranelift/codegen/src/ir/mod.rs +++ b/cranelift/codegen/src/ir/mod.rs @@ -33,7 +33,7 @@ pub use crate::ir::builder::{ pub use crate::ir::constant::{ConstantData, ConstantOffset, ConstantPool}; pub use crate::ir::dfg::{DataFlowGraph, ValueDef}; pub use crate::ir::entities::{ - Constant, Ebb, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot, + Block, Constant, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot, Table, Value, }; pub use crate::ir::extfunc::{ @@ -73,8 +73,8 @@ pub type JumpTables = PrimaryMap; /// Map of instruction encodings. pub type InstEncodings = SecondaryMap; -/// Code offsets for EBBs. -pub type EbbOffsets = SecondaryMap; +/// Code offsets for blocks. +pub type BlockOffsets = SecondaryMap; /// Code offsets for Jump Tables. pub type JumpTableOffsets = SecondaryMap; diff --git a/cranelift/codegen/src/ir/progpoint.rs b/cranelift/codegen/src/ir/progpoint.rs index 4bfa2c39e7..df1a7d14b3 100644 --- a/cranelift/codegen/src/ir/progpoint.rs +++ b/cranelift/codegen/src/ir/progpoint.rs @@ -1,7 +1,7 @@ //! Program points. use crate::entity::EntityRef; -use crate::ir::{Ebb, Inst, ValueDef}; +use crate::ir::{Block, Inst, ValueDef}; use core::cmp; use core::fmt; use core::u32; @@ -10,7 +10,7 @@ use core::u32; /// begin or end. It can be either: /// /// 1. An instruction or -/// 2. An EBB header. +/// 2. An block header. /// /// This corresponds more or less to the lines in the textual form of Cranelift IR. #[derive(PartialEq, Eq, Clone, Copy)] @@ -24,9 +24,9 @@ impl From for ProgramPoint { } } -impl From for ProgramPoint { - fn from(ebb: Ebb) -> Self { - let idx = ebb.index(); +impl From for ProgramPoint { + fn from(block: Block) -> Self { + let idx = block.index(); debug_assert!(idx < (u32::MAX / 2) as usize); Self((idx * 2 + 1) as u32) } @@ -36,7 +36,7 @@ impl From for ProgramPoint { fn from(def: ValueDef) -> Self { match def { ValueDef::Result(inst, _) => inst.into(), - ValueDef::Param(ebb, _) => ebb.into(), + ValueDef::Param(block, _) => block.into(), } } } @@ -47,8 +47,8 @@ impl From for ProgramPoint { pub enum ExpandedProgramPoint { /// An instruction in the function. Inst(Inst), - /// An EBB header. - Ebb(Ebb), + /// An block header. + Block(Block), } impl ExpandedProgramPoint { @@ -56,7 +56,7 @@ impl ExpandedProgramPoint { pub fn unwrap_inst(self) -> Inst { match self { Self::Inst(x) => x, - Self::Ebb(x) => panic!("expected inst: {}", x), + Self::Block(x) => panic!("expected inst: {}", x), } } } @@ -67,9 +67,9 @@ impl From for ExpandedProgramPoint { } } -impl From for ExpandedProgramPoint { - fn from(ebb: Ebb) -> Self { - Self::Ebb(ebb) +impl From for ExpandedProgramPoint { + fn from(block: Block) -> Self { + Self::Block(block) } } @@ -77,7 +77,7 @@ impl From for ExpandedProgramPoint { fn from(def: ValueDef) -> Self { match def { ValueDef::Result(inst, _) => inst.into(), - ValueDef::Param(ebb, _) => ebb.into(), + ValueDef::Param(block, _) => block.into(), } } } @@ -87,7 +87,7 @@ impl From for ExpandedProgramPoint { if pp.0 & 1 == 0 { Self::Inst(Inst::from_u32(pp.0 / 2)) } else { - Self::Ebb(Ebb::from_u32(pp.0 / 2)) + Self::Block(Block::from_u32(pp.0 / 2)) } } } @@ -96,7 +96,7 @@ impl fmt::Display for ExpandedProgramPoint { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Self::Inst(x) => write!(f, "{}", x), - Self::Ebb(x) => write!(f, "{}", x), + Self::Block(x) => write!(f, "{}", x), } } } @@ -129,7 +129,7 @@ pub trait ProgramOrder { /// /// Return `Less` if `a` appears in the program before `b`. /// - /// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments + /// This is declared as a generic such that it can be called with `Inst` and `Block` arguments /// directly. Depending on the implementation, there is a good chance performance will be /// improved for those cases where the type of either argument is known statically. fn cmp(&self, a: A, b: B) -> cmp::Ordering @@ -137,28 +137,28 @@ pub trait ProgramOrder { A: Into, B: Into; - /// Is the range from `inst` to `ebb` just the gap between consecutive EBBs? + /// Is the range from `inst` to `block` just the gap between consecutive blocks? /// - /// This returns true if `inst` is the terminator in the EBB immediately before `ebb`. - fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool; + /// This returns true if `inst` is the terminator in the block immediately before `block`. + fn is_block_gap(&self, inst: Inst, block: Block) -> bool; } #[cfg(test)] mod tests { use super::*; use crate::entity::EntityRef; - use crate::ir::{Ebb, Inst}; + use crate::ir::{Block, Inst}; use alloc::string::ToString; #[test] fn convert() { let i5 = Inst::new(5); - let b3 = Ebb::new(3); + let b3 = Block::new(3); let pp1: ProgramPoint = i5.into(); let pp2: ProgramPoint = b3.into(); assert_eq!(pp1.to_string(), "inst5"); - assert_eq!(pp2.to_string(), "ebb3"); + assert_eq!(pp2.to_string(), "block3"); } } diff --git a/cranelift/codegen/src/isa/constraints.rs b/cranelift/codegen/src/isa/constraints.rs index ce1d2a5fe1..c87c3bd9d4 100644 --- a/cranelift/codegen/src/isa/constraints.rs +++ b/cranelift/codegen/src/isa/constraints.rs @@ -95,7 +95,7 @@ pub struct RecipeConstraints { /// If the instruction takes a variable number of operands, the register constraints for those /// operands must be computed dynamically. /// - /// - For branches and jumps, EBB arguments must match the expectations of the destination EBB. + /// - For branches and jumps, block arguments must match the expectations of the destination block. /// - For calls and returns, the calling convention ABI specifies constraints. pub ins: &'static [OperandConstraint], @@ -173,7 +173,7 @@ pub struct BranchRange { impl BranchRange { /// Determine if this branch range can represent the range from `branch` to `dest`, where /// `branch` is the code offset of the branch instruction itself and `dest` is the code offset - /// of the destination EBB header. + /// of the destination block header. /// /// This method does not detect if the range is larger than 2 GB. pub fn contains(self, branch: CodeOffset, dest: CodeOffset) -> bool { diff --git a/cranelift/codegen/src/isa/riscv/mod.rs b/cranelift/codegen/src/isa/riscv/mod.rs index 25244fab81..8aa264f34f 100644 --- a/cranelift/codegen/src/isa/riscv/mod.rs +++ b/cranelift/codegen/src/isa/riscv/mod.rs @@ -158,9 +158,9 @@ mod tests { .finish(shared_flags); let mut func = Function::new(); - let ebb = func.dfg.make_ebb(); - let arg64 = func.dfg.append_ebb_param(ebb, types::I64); - let arg32 = func.dfg.append_ebb_param(ebb, types::I32); + let block = func.dfg.make_block(); + let arg64 = func.dfg.append_block_param(block, types::I64); + let arg32 = func.dfg.append_block_param(block, types::I32); // Try to encode iadd_imm.i64 v1, -10. let inst64 = InstructionData::BinaryImm { @@ -209,9 +209,9 @@ mod tests { .finish(shared_flags); let mut func = Function::new(); - let ebb = func.dfg.make_ebb(); - let arg64 = func.dfg.append_ebb_param(ebb, types::I64); - let arg32 = func.dfg.append_ebb_param(ebb, types::I32); + let block = func.dfg.make_block(); + let arg64 = func.dfg.append_block_param(block, types::I64); + let arg32 = func.dfg.append_block_param(block, types::I32); // Try to encode iadd_imm.i64 v1, -10. let inst64 = InstructionData::BinaryImm { @@ -268,8 +268,8 @@ mod tests { let isa = isa_builder.finish(shared_flags); let mut func = Function::new(); - let ebb = func.dfg.make_ebb(); - let arg32 = func.dfg.append_ebb_param(ebb, types::I32); + let block = func.dfg.make_block(); + let arg32 = func.dfg.append_block_param(block, types::I32); // Create an imul.i32 which is encodable in RV32M. let mul32 = InstructionData::Binary { diff --git a/cranelift/codegen/src/isa/x86/abi.rs b/cranelift/codegen/src/isa/x86/abi.rs index 3d160a2cf5..db67457a6c 100644 --- a/cranelift/codegen/src/isa/x86/abi.rs +++ b/cranelift/codegen/src/isa/x86/abi.rs @@ -419,8 +419,8 @@ fn callee_saved_gprs_used(isa: &dyn TargetIsa, func: &ir::Function) -> RegisterS // // TODO: Consider re-evaluating how regmove/regfill/regspill work and whether it's possible // to avoid this step. - for ebb in &func.layout { - for inst in func.layout.ebb_insts(ebb) { + for block in &func.layout { + for inst in func.layout.block_insts(block) { match func.dfg[inst] { ir::instructions::InstructionData::RegMove { dst, .. } | ir::instructions::InstructionData::RegFill { dst, .. } => { @@ -551,8 +551,8 @@ fn fastcall_prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> C } // Set up the cursor and insert the prologue - let entry_ebb = func.layout.entry_block().expect("missing entry block"); - let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_ebb); + let entry_block = func.layout.entry_block().expect("missing entry block"); + let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_block); let prologue_cfa_state = insert_common_prologue(&mut pos, local_stack_size, reg_type, &csrs, isa); @@ -612,8 +612,8 @@ fn system_v_prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> C } // Set up the cursor and insert the prologue - let entry_ebb = func.layout.entry_block().expect("missing entry block"); - let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_ebb); + let entry_block = func.layout.entry_block().expect("missing entry block"); + let mut pos = EncCursor::new(func, isa).at_first_insertion_point(entry_block); let prologue_cfa_state = insert_common_prologue(&mut pos, local_stack_size, reg_type, &csrs, isa); @@ -678,9 +678,9 @@ fn insert_common_prologue( None }; - // Append param to entry EBB - let ebb = pos.current_ebb().expect("missing ebb under cursor"); - let fp = pos.func.dfg.append_ebb_param(ebb, reg_type); + // Append param to entry block + let block = pos.current_block().expect("missing block under cursor"); + let fp = pos.func.dfg.append_block_param(block, reg_type); pos.func.locations[fp] = ir::ValueLoc::Reg(RU::rbp as RegUnit); let push_fp_inst = pos.ins().x86_push(fp); @@ -727,8 +727,8 @@ fn insert_common_prologue( } for reg in csrs.iter(GPR) { - // Append param to entry EBB - let csr_arg = pos.func.dfg.append_ebb_param(ebb, reg_type); + // Append param to entry block + let csr_arg = pos.func.dfg.append_block_param(block, reg_type); // Assign it a location pos.func.locations[csr_arg] = ir::ValueLoc::Reg(reg); @@ -831,11 +831,11 @@ fn insert_common_epilogues( isa: &dyn TargetIsa, cfa_state: Option, ) { - while let Some(ebb) = pos.next_ebb() { - pos.goto_last_inst(ebb); + while let Some(block) = pos.next_block() { + pos.goto_last_inst(block); if let Some(inst) = pos.current_inst() { if pos.func.dfg[inst].opcode().is_return() { - let is_last = pos.func.layout.last_ebb() == Some(ebb); + let is_last = pos.func.layout.last_block() == Some(block); insert_common_epilogue( inst, stack_size, diff --git a/cranelift/codegen/src/isa/x86/binemit.rs b/cranelift/codegen/src/isa/x86/binemit.rs index 5a373e6f96..44c497e547 100644 --- a/cranelift/codegen/src/isa/x86/binemit.rs +++ b/cranelift/codegen/src/isa/x86/binemit.rs @@ -4,7 +4,7 @@ use super::enc_tables::{needs_offset, needs_sib_byte}; use super::registers::RU; use crate::binemit::{bad_encoding, CodeSink, Reloc}; use crate::ir::condcodes::{CondCode, FloatCC, IntCC}; -use crate::ir::{Constant, Ebb, Function, Inst, InstructionData, JumpTable, Opcode, TrapCode}; +use crate::ir::{Block, Constant, Function, Inst, InstructionData, JumpTable, Opcode, TrapCode}; use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef, TargetIsa}; use crate::regalloc::RegDiversions; @@ -369,13 +369,13 @@ fn fcc2opc(cond: FloatCC) -> u16 { } /// Emit a single-byte branch displacement to `destination`. -fn disp1(destination: Ebb, func: &Function, sink: &mut CS) { +fn disp1(destination: Block, func: &Function, sink: &mut CS) { let delta = func.offsets[destination].wrapping_sub(sink.offset() + 1); sink.put1(delta as u8); } /// Emit a four-byte branch displacement to `destination`. -fn disp4(destination: Ebb, func: &Function, sink: &mut CS) { +fn disp4(destination: Block, func: &Function, sink: &mut CS) { let delta = func.offsets[destination].wrapping_sub(sink.offset() + 4); sink.put4(delta); } diff --git a/cranelift/codegen/src/isa/x86/enc_tables.rs b/cranelift/codegen/src/isa/x86/enc_tables.rs index 9a5481e228..947b315cd2 100644 --- a/cranelift/codegen/src/isa/x86/enc_tables.rs +++ b/cranelift/codegen/src/isa/x86/enc_tables.rs @@ -253,7 +253,7 @@ fn expand_sdivrem( _ => panic!("Need sdiv/srem: {}", func.dfg.display_inst(inst, None)), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); @@ -297,17 +297,17 @@ fn expand_sdivrem( return; } - // EBB handling the nominal case. - let nominal = pos.func.dfg.make_ebb(); + // block handling the nominal case. + let nominal = pos.func.dfg.make_block(); - // EBB handling the -1 divisor case. - let minus_one = pos.func.dfg.make_ebb(); + // block handling the -1 divisor case. + let minus_one = pos.func.dfg.make_block(); - // Final EBB with one argument representing the final result value. - let done = pos.func.dfg.make_ebb(); + // Final block with one argument representing the final result value. + let done = pos.func.dfg.make_block(); - // Move the `inst` result value onto the `done` EBB. - pos.func.dfg.attach_ebb_param(done, result); + // Move the `inst` result value onto the `done` block. + pos.func.dfg.attach_block_param(done, result); // Start by checking for a -1 divisor which needs to be handled specially. let is_m1 = pos.ins().ifcmp_imm(y, -1); @@ -316,14 +316,14 @@ fn expand_sdivrem( // Now it is safe to execute the `x86_sdivmodx` instruction which will still trap on division // by zero. - pos.insert_ebb(nominal); + pos.insert_block(nominal); let xhi = pos.ins().sshr_imm(x, i64::from(ty.lane_bits()) - 1); let (quot, rem) = pos.ins().x86_sdivmodx(x, xhi, y); let divres = if is_srem { rem } else { quot }; pos.ins().jump(done, &[divres]); // Now deal with the -1 divisor case. - pos.insert_ebb(minus_one); + pos.insert_block(minus_one); let m1_result = if is_srem { // x % -1 = 0. pos.ins().iconst(ty, 0) @@ -342,12 +342,12 @@ fn expand_sdivrem( // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, nominal); - cfg.recompute_ebb(pos.func, minus_one); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, nominal); + cfg.recompute_block(pos.func, minus_one); + cfg.recompute_block(pos.func, done); } /// Expand the `udiv` and `urem` instructions using `x86_udivmodx`. @@ -421,7 +421,7 @@ fn expand_minmax( } => (args[0], args[1], ir::Opcode::X86Fmax, ir::Opcode::Band), _ => panic!("Expected fmin/fmax: {}", func.dfg.display_inst(inst, None)), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); // We need to handle the following conditions, depending on how x and y compare: // @@ -430,20 +430,20 @@ fn expand_minmax( // fmin(0.0, -0.0) -> -0.0 and fmax(0.0, -0.0) -> 0.0. // 3. UN: We need to produce a quiet NaN that is canonical if the inputs are canonical. - // EBB handling case 1) where operands are ordered but not equal. - let one_ebb = func.dfg.make_ebb(); + // block handling case 1) where operands are ordered but not equal. + let one_block = func.dfg.make_block(); - // EBB handling case 3) where one operand is NaN. - let uno_ebb = func.dfg.make_ebb(); + // block handling case 3) where one operand is NaN. + let uno_block = func.dfg.make_block(); - // EBB that handles the unordered or equal cases 2) and 3). - let ueq_ebb = func.dfg.make_ebb(); + // block that handles the unordered or equal cases 2) and 3). + let ueq_block = func.dfg.make_block(); - // EBB handling case 2) where operands are ordered and equal. - let eq_ebb = func.dfg.make_ebb(); + // block handling case 2) where operands are ordered and equal. + let eq_block = func.dfg.make_block(); - // Final EBB with one argument representing the final result value. - let done = func.dfg.make_ebb(); + // Final block with one argument representing the final result value. + let done = func.dfg.make_block(); // The basic blocks are laid out to minimize branching for the common cases: // @@ -451,21 +451,21 @@ fn expand_minmax( // 2) One branch taken. // 3) Two branches taken, one jump. - // Move the `inst` result value onto the `done` EBB. + // Move the `inst` result value onto the `done` block. let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); func.dfg.clear_results(inst); - func.dfg.attach_ebb_param(done, result); + func.dfg.attach_block_param(done, result); // Test for case 1) ordered and not equal. let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); let cmp_ueq = pos.ins().fcmp(FloatCC::UnorderedOrEqual, x, y); - pos.ins().brnz(cmp_ueq, ueq_ebb, &[]); - pos.ins().jump(one_ebb, &[]); + pos.ins().brnz(cmp_ueq, ueq_block, &[]); + pos.ins().jump(one_block, &[]); // Handle the common ordered, not equal (LT|GT) case. - pos.insert_ebb(one_ebb); + pos.insert_block(one_block); let one_inst = pos.ins().Binary(x86_opc, ty, x, y).0; let one_result = pos.func.dfg.first_result(one_inst); pos.ins().jump(done, &[one_result]); @@ -473,21 +473,21 @@ fn expand_minmax( // Case 3) Unordered. // We know that at least one operand is a NaN that needs to be propagated. We simply use an // `fadd` instruction which has the same NaN propagation semantics. - pos.insert_ebb(uno_ebb); + pos.insert_block(uno_block); let uno_result = pos.ins().fadd(x, y); pos.ins().jump(done, &[uno_result]); // Case 2) or 3). - pos.insert_ebb(ueq_ebb); + pos.insert_block(ueq_block); // Test for case 3) (UN) one value is NaN. // TODO: When we get support for flag values, we can reuse the above comparison. let cmp_uno = pos.ins().fcmp(FloatCC::Unordered, x, y); - pos.ins().brnz(cmp_uno, uno_ebb, &[]); - pos.ins().jump(eq_ebb, &[]); + pos.ins().brnz(cmp_uno, uno_block, &[]); + pos.ins().jump(eq_block, &[]); // We are now in case 2) where x and y compare EQ. // We need a bitwise operation to get the sign right. - pos.insert_ebb(eq_ebb); + pos.insert_block(eq_block); let bw_inst = pos.ins().Binary(bitwise_opc, ty, x, y).0; let bw_result = pos.func.dfg.first_result(bw_inst); // This should become a fall-through for this second most common case. @@ -496,14 +496,14 @@ fn expand_minmax( // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, one_ebb); - cfg.recompute_ebb(pos.func, uno_ebb); - cfg.recompute_ebb(pos.func, ueq_ebb); - cfg.recompute_ebb(pos.func, eq_ebb); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, one_block); + cfg.recompute_block(pos.func, uno_block); + cfg.recompute_block(pos.func, ueq_block); + cfg.recompute_block(pos.func, eq_block); + cfg.recompute_block(pos.func, done); } /// x86 has no unsigned-to-float conversions. We handle the easy case of zero-extending i32 to @@ -540,33 +540,33 @@ fn expand_fcvt_from_uint( _ => unimplemented!(), } - let old_ebb = pos.func.layout.pp_ebb(inst); + let old_block = pos.func.layout.pp_block(inst); - // EBB handling the case where x >= 0. - let poszero_ebb = pos.func.dfg.make_ebb(); + // block handling the case where x >= 0. + let poszero_block = pos.func.dfg.make_block(); - // EBB handling the case where x < 0. - let neg_ebb = pos.func.dfg.make_ebb(); + // block handling the case where x < 0. + let neg_block = pos.func.dfg.make_block(); - // Final EBB with one argument representing the final result value. - let done = pos.func.dfg.make_ebb(); + // Final block with one argument representing the final result value. + let done = pos.func.dfg.make_block(); - // Move the `inst` result value onto the `done` EBB. + // Move the `inst` result value onto the `done` block. pos.func.dfg.clear_results(inst); - pos.func.dfg.attach_ebb_param(done, result); + pos.func.dfg.attach_block_param(done, result); // If x as a signed int is not negative, we can use the existing `fcvt_from_sint` instruction. let is_neg = pos.ins().icmp_imm(IntCC::SignedLessThan, x, 0); - pos.ins().brnz(is_neg, neg_ebb, &[]); - pos.ins().jump(poszero_ebb, &[]); + pos.ins().brnz(is_neg, neg_block, &[]); + pos.ins().jump(poszero_block, &[]); // Easy case: just use a signed conversion. - pos.insert_ebb(poszero_ebb); + pos.insert_block(poszero_block); let posres = pos.ins().fcvt_from_sint(ty, x); pos.ins().jump(done, &[posres]); // Now handle the negative case. - pos.insert_ebb(neg_ebb); + pos.insert_block(neg_block); // Divide x by two to get it in range for the signed conversion, keep the LSB, and scale it // back up on the FP side. @@ -581,12 +581,12 @@ fn expand_fcvt_from_uint( // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, poszero_ebb); - cfg.recompute_ebb(pos.func, neg_ebb); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, poszero_block); + cfg.recompute_block(pos.func, neg_block); + cfg.recompute_block(pos.func, done); } fn expand_fcvt_to_sint( @@ -604,16 +604,16 @@ fn expand_fcvt_to_sint( } => arg, _ => panic!("Need fcvt_to_sint: {}", func.dfg.display_inst(inst, None)), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); let xty = func.dfg.value_type(x); let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); - // Final EBB after the bad value checks. - let done = func.dfg.make_ebb(); + // Final block after the bad value checks. + let done = func.dfg.make_block(); - // EBB for checking failure cases. - let maybe_trap_ebb = func.dfg.make_ebb(); + // block for checking failure cases. + let maybe_trap_block = func.dfg.make_block(); // The `x86_cvtt2si` performs the desired conversion, but it doesn't trap on NaN or overflow. // It produces an INT_MIN result instead. @@ -626,7 +626,7 @@ fn expand_fcvt_to_sint( .ins() .icmp_imm(IntCC::NotEqual, result, 1 << (ty.lane_bits() - 1)); pos.ins().brnz(is_done, done, &[]); - pos.ins().jump(maybe_trap_ebb, &[]); + pos.ins().jump(maybe_trap_block, &[]); // We now have the following possibilities: // @@ -634,7 +634,7 @@ fn expand_fcvt_to_sint( // 2. The input was NaN -> trap bad_toint // 3. The input was out of range -> trap int_ovf // - pos.insert_ebb(maybe_trap_ebb); + pos.insert_block(maybe_trap_block); // Check for NaN. let is_nan = pos.ins().fcmp(FloatCC::Unordered, x, x); @@ -683,11 +683,11 @@ fn expand_fcvt_to_sint( pos.ins().trapnz(overflow, ir::TrapCode::IntegerOverflow); pos.ins().jump(done, &[]); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, maybe_trap_ebb); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, maybe_trap_block); + cfg.recompute_block(pos.func, done); } fn expand_fcvt_to_sint_sat( @@ -709,18 +709,18 @@ fn expand_fcvt_to_sint_sat( ), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); let xty = func.dfg.value_type(x); let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); - // Final EBB after the bad value checks. - let done_ebb = func.dfg.make_ebb(); - let intmin_ebb = func.dfg.make_ebb(); - let minsat_ebb = func.dfg.make_ebb(); - let maxsat_ebb = func.dfg.make_ebb(); + // Final block after the bad value checks. + let done_block = func.dfg.make_block(); + let intmin_block = func.dfg.make_block(); + let minsat_block = func.dfg.make_block(); + let maxsat_block = func.dfg.make_block(); func.dfg.clear_results(inst); - func.dfg.attach_ebb_param(done_ebb, result); + func.dfg.attach_block_param(done_block, result); let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); @@ -732,25 +732,25 @@ fn expand_fcvt_to_sint_sat( let is_done = pos .ins() .icmp_imm(IntCC::NotEqual, cvtt2si, 1 << (ty.lane_bits() - 1)); - pos.ins().brnz(is_done, done_ebb, &[cvtt2si]); - pos.ins().jump(intmin_ebb, &[]); + pos.ins().brnz(is_done, done_block, &[cvtt2si]); + pos.ins().jump(intmin_block, &[]); // We now have the following possibilities: // // 1. INT_MIN was actually the correct conversion result. // 2. The input was NaN -> replace the result value with 0. // 3. The input was out of range -> saturate the result to the min/max value. - pos.insert_ebb(intmin_ebb); + pos.insert_block(intmin_block); // Check for NaN, which is truncated to 0. let zero = pos.ins().iconst(ty, 0); let is_nan = pos.ins().fcmp(FloatCC::Unordered, x, x); - pos.ins().brnz(is_nan, done_ebb, &[zero]); - pos.ins().jump(minsat_ebb, &[]); + pos.ins().brnz(is_nan, done_block, &[zero]); + pos.ins().jump(minsat_block, &[]); // Check for case 1: INT_MIN is the correct result. // Determine the smallest floating point number that would convert to INT_MIN. - pos.insert_ebb(minsat_ebb); + pos.insert_block(minsat_block); let mut overflow_cc = FloatCC::LessThan; let output_bits = ty.lane_bits(); let flimit = match xty { @@ -786,11 +786,11 @@ fn expand_fcvt_to_sint_sat( _ => panic!("Don't know the min value for {}", ty), }; let min_value = pos.ins().iconst(ty, min_imm); - pos.ins().brnz(overflow, done_ebb, &[min_value]); - pos.ins().jump(maxsat_ebb, &[]); + pos.ins().brnz(overflow, done_block, &[min_value]); + pos.ins().jump(maxsat_block, &[]); // Finally, we could have a positive value that is too large. - pos.insert_ebb(maxsat_ebb); + pos.insert_block(maxsat_block); let fzero = match xty { ir::types::F32 => pos.ins().f32const(Ieee32::with_bits(0)), ir::types::F64 => pos.ins().f64const(Ieee64::with_bits(0)), @@ -805,20 +805,20 @@ fn expand_fcvt_to_sint_sat( let max_value = pos.ins().iconst(ty, max_imm); let overflow = pos.ins().fcmp(FloatCC::GreaterThanOrEqual, x, fzero); - pos.ins().brnz(overflow, done_ebb, &[max_value]); + pos.ins().brnz(overflow, done_block, &[max_value]); // Recycle the original instruction. - pos.func.dfg.replace(inst).jump(done_ebb, &[cvtt2si]); + pos.func.dfg.replace(inst).jump(done_block, &[cvtt2si]); // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done_ebb); + pos.insert_block(done_block); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, intmin_ebb); - cfg.recompute_ebb(pos.func, minsat_ebb); - cfg.recompute_ebb(pos.func, maxsat_ebb); - cfg.recompute_ebb(pos.func, done_ebb); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, intmin_block); + cfg.recompute_block(pos.func, minsat_block); + cfg.recompute_block(pos.func, maxsat_block); + cfg.recompute_block(pos.func, done_block); } fn expand_fcvt_to_uint( @@ -837,26 +837,26 @@ fn expand_fcvt_to_uint( _ => panic!("Need fcvt_to_uint: {}", func.dfg.display_inst(inst, None)), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); let xty = func.dfg.value_type(x); let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); - // EBB handle numbers < 2^(N-1). - let below_uint_max_ebb = func.dfg.make_ebb(); + // block handle numbers < 2^(N-1). + let below_uint_max_block = func.dfg.make_block(); - // EBB handle numbers < 0. - let below_zero_ebb = func.dfg.make_ebb(); + // block handle numbers < 0. + let below_zero_block = func.dfg.make_block(); - // EBB handling numbers >= 2^(N-1). - let large = func.dfg.make_ebb(); + // block handling numbers >= 2^(N-1). + let large = func.dfg.make_block(); - // Final EBB after the bad value checks. - let done = func.dfg.make_ebb(); + // Final block after the bad value checks. + let done = func.dfg.make_block(); - // Move the `inst` result value onto the `done` EBB. + // Move the `inst` result value onto the `done` block. func.dfg.clear_results(inst); - func.dfg.attach_ebb_param(done, result); + func.dfg.attach_block_param(done, result); let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); @@ -871,11 +871,11 @@ fn expand_fcvt_to_uint( let is_large = pos.ins().ffcmp(x, pow2nm1); pos.ins() .brff(FloatCC::GreaterThanOrEqual, is_large, large, &[]); - pos.ins().jump(below_uint_max_ebb, &[]); + pos.ins().jump(below_uint_max_block, &[]); // We need to generate a specific trap code when `x` is NaN, so reuse the flags from the // previous comparison. - pos.insert_ebb(below_uint_max_ebb); + pos.insert_block(below_uint_max_block); pos.ins().trapff( FloatCC::Unordered, is_large, @@ -887,13 +887,13 @@ fn expand_fcvt_to_uint( let is_neg = pos.ins().ifcmp_imm(sres, 0); pos.ins() .brif(IntCC::SignedGreaterThanOrEqual, is_neg, done, &[sres]); - pos.ins().jump(below_zero_ebb, &[]); + pos.ins().jump(below_zero_block, &[]); - pos.insert_ebb(below_zero_ebb); + pos.insert_block(below_zero_block); pos.ins().trap(ir::TrapCode::IntegerOverflow); // Handle the case where x >= 2^(N-1) and not NaN. - pos.insert_ebb(large); + pos.insert_block(large); let adjx = pos.ins().fsub(x, pow2nm1); let lres = pos.ins().x86_cvtt2si(ty, adjx); let is_neg = pos.ins().ifcmp_imm(lres, 0); @@ -906,13 +906,13 @@ fn expand_fcvt_to_uint( // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, below_uint_max_ebb); - cfg.recompute_ebb(pos.func, below_zero_ebb); - cfg.recompute_ebb(pos.func, large); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, below_uint_max_block); + cfg.recompute_block(pos.func, below_zero_block); + cfg.recompute_block(pos.func, large); + cfg.recompute_block(pos.func, done); } fn expand_fcvt_to_uint_sat( @@ -934,27 +934,27 @@ fn expand_fcvt_to_uint_sat( ), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); let xty = func.dfg.value_type(x); let result = func.dfg.first_result(inst); let ty = func.dfg.value_type(result); - // EBB handle numbers < 2^(N-1). - let below_pow2nm1_or_nan_ebb = func.dfg.make_ebb(); - let below_pow2nm1_ebb = func.dfg.make_ebb(); + // block handle numbers < 2^(N-1). + let below_pow2nm1_or_nan_block = func.dfg.make_block(); + let below_pow2nm1_block = func.dfg.make_block(); - // EBB handling numbers >= 2^(N-1). - let large = func.dfg.make_ebb(); + // block handling numbers >= 2^(N-1). + let large = func.dfg.make_block(); - // EBB handling numbers < 2^N. - let uint_large_ebb = func.dfg.make_ebb(); + // block handling numbers < 2^N. + let uint_large_block = func.dfg.make_block(); - // Final EBB after the bad value checks. - let done = func.dfg.make_ebb(); + // Final block after the bad value checks. + let done = func.dfg.make_block(); - // Move the `inst` result value onto the `done` EBB. + // Move the `inst` result value onto the `done` block. func.dfg.clear_results(inst); - func.dfg.attach_ebb_param(done, result); + func.dfg.attach_block_param(done, result); let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); @@ -970,16 +970,16 @@ fn expand_fcvt_to_uint_sat( let is_large = pos.ins().ffcmp(x, pow2nm1); pos.ins() .brff(FloatCC::GreaterThanOrEqual, is_large, large, &[]); - pos.ins().jump(below_pow2nm1_or_nan_ebb, &[]); + pos.ins().jump(below_pow2nm1_or_nan_block, &[]); // We need to generate zero when `x` is NaN, so reuse the flags from the previous comparison. - pos.insert_ebb(below_pow2nm1_or_nan_ebb); + pos.insert_block(below_pow2nm1_or_nan_block); pos.ins().brff(FloatCC::Unordered, is_large, done, &[zero]); - pos.ins().jump(below_pow2nm1_ebb, &[]); + pos.ins().jump(below_pow2nm1_block, &[]); // Now we know that x < 2^(N-1) and not NaN. If the result of the cvtt2si is positive, we're // done; otherwise saturate to the minimum unsigned value, that is 0. - pos.insert_ebb(below_pow2nm1_ebb); + pos.insert_block(below_pow2nm1_block); let sres = pos.ins().x86_cvtt2si(ty, x); let is_neg = pos.ins().ifcmp_imm(sres, 0); pos.ins() @@ -987,7 +987,7 @@ fn expand_fcvt_to_uint_sat( pos.ins().jump(done, &[zero]); // Handle the case where x >= 2^(N-1) and not NaN. - pos.insert_ebb(large); + pos.insert_block(large); let adjx = pos.ins().fsub(x, pow2nm1); let lres = pos.ins().x86_cvtt2si(ty, adjx); let max_value = pos.ins().iconst( @@ -1001,9 +1001,9 @@ fn expand_fcvt_to_uint_sat( let is_neg = pos.ins().ifcmp_imm(lres, 0); pos.ins() .brif(IntCC::SignedLessThan, is_neg, done, &[max_value]); - pos.ins().jump(uint_large_ebb, &[]); + pos.ins().jump(uint_large_block, &[]); - pos.insert_ebb(uint_large_ebb); + pos.insert_block(uint_large_block); let lfinal = pos.ins().iadd_imm(lres, 1 << (ty.lane_bits() - 1)); // Recycle the original instruction as a jump. @@ -1011,14 +1011,14 @@ fn expand_fcvt_to_uint_sat( // Finally insert a label for the completion. pos.next_inst(); - pos.insert_ebb(done); + pos.insert_block(done); - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, below_pow2nm1_or_nan_ebb); - cfg.recompute_ebb(pos.func, below_pow2nm1_ebb); - cfg.recompute_ebb(pos.func, large); - cfg.recompute_ebb(pos.func, uint_large_ebb); - cfg.recompute_ebb(pos.func, done); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, below_pow2nm1_or_nan_block); + cfg.recompute_block(pos.func, below_pow2nm1_block); + cfg.recompute_block(pos.func, large); + cfg.recompute_block(pos.func, uint_large_block); + cfg.recompute_block(pos.func, done); } /// Convert shuffle instructions. diff --git a/cranelift/codegen/src/isa/x86/fde.rs b/cranelift/codegen/src/isa/x86/fde.rs index 6a56a09451..6687f532b0 100644 --- a/cranelift/codegen/src/isa/x86/fde.rs +++ b/cranelift/codegen/src/isa/x86/fde.rs @@ -182,14 +182,14 @@ pub fn emit_fde(func: &Function, isa: &dyn TargetIsa, sink: &mut dyn FrameUnwind assert!(func.frame_layout.is_some(), "expected func.frame_layout"); let frame_layout = func.frame_layout.as_ref().unwrap(); - let mut ebbs = func.layout.ebbs().collect::>(); - ebbs.sort_by_key(|ebb| func.offsets[*ebb]); // Ensure inst offsets always increase + let mut blocks = func.layout.blocks().collect::>(); + blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase let encinfo = isa.encoding_info(); let mut last_offset = 0; let mut changes = Vec::new(); - for ebb in ebbs { - for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) { + for block in blocks { + for (offset, inst, size) in func.inst_offsets(block, &encinfo) { let address_offset = (offset + size) as usize; assert!(last_offset <= address_offset); if let Some(cmds) = frame_layout.instructions.get(&inst) { @@ -343,9 +343,9 @@ mod tests { let mut func = Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv)); - let ebb0 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); pos.ins().return_(&[]); if let Some(stack_slot) = stack_slot { @@ -411,20 +411,20 @@ mod tests { sig.params.push(AbiParam::new(types::I32)); let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); - let ebb0 = func.dfg.make_ebb(); - let v0 = func.dfg.append_ebb_param(ebb0, types::I32); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let v0 = func.dfg.append_block_param(block0, types::I32); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); - pos.ins().brnz(v0, ebb2, &[]); - pos.ins().jump(ebb1, &[]); + pos.insert_block(block0); + pos.ins().brnz(v0, block2, &[]); + pos.ins().jump(block1, &[]); - pos.insert_ebb(ebb1); + pos.insert_block(block1); pos.ins().return_(&[]); - pos.insert_ebb(ebb2); + pos.insert_block(block2); pos.ins().trap(TrapCode::User(0)); func diff --git a/cranelift/codegen/src/isa/x86/unwind.rs b/cranelift/codegen/src/isa/x86/unwind.rs index ce05f3afab..693693ab37 100644 --- a/cranelift/codegen/src/isa/x86/unwind.rs +++ b/cranelift/codegen/src/isa/x86/unwind.rs @@ -127,7 +127,7 @@ impl UnwindInfo { } let prologue_end = func.prologue_end.unwrap(); - let entry_block = func.layout.ebbs().nth(0).expect("missing entry block"); + let entry_block = func.layout.blocks().nth(0).expect("missing entry block"); // Stores the stack size when SP is not adjusted via an immediate value let mut stack_size = None; @@ -519,9 +519,9 @@ mod tests { let mut func = Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv)); - let ebb0 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); pos.ins().return_(&[]); if let Some(stack_slot) = stack_slot { diff --git a/cranelift/codegen/src/legalizer/boundary.rs b/cranelift/codegen/src/legalizer/boundary.rs index 5063b6d910..7fb977a06a 100644 --- a/cranelift/codegen/src/legalizer/boundary.rs +++ b/cranelift/codegen/src/legalizer/boundary.rs @@ -22,7 +22,7 @@ use crate::cursor::{Cursor, FuncCursor}; use crate::flowgraph::ControlFlowGraph; use crate::ir::instructions::CallInfo; use crate::ir::{ - AbiParam, ArgumentLoc, ArgumentPurpose, DataFlowGraph, Ebb, Function, Inst, InstBuilder, + AbiParam, ArgumentLoc, ArgumentPurpose, Block, DataFlowGraph, Function, Inst, InstBuilder, MemFlags, SigRef, Signature, StackSlotData, StackSlotKind, Type, Value, ValueLoc, }; use crate::isa::TargetIsa; @@ -84,12 +84,12 @@ fn legalize_signature( /// Legalize the entry block parameters after `func`'s signature has been legalized. /// /// The legalized signature may contain more parameters than the original signature, and the -/// parameter types have been changed. This function goes through the parameters of the entry EBB +/// parameter types have been changed. This function goes through the parameters of the entry block /// and replaces them with parameters of the right type for the ABI. /// -/// The original entry EBB parameters are computed from the new ABI parameters by code inserted at +/// The original entry block parameters are computed from the new ABI parameters by code inserted at /// the top of the entry block. -fn legalize_entry_params(func: &mut Function, entry: Ebb) { +fn legalize_entry_params(func: &mut Function, entry: Block) { let mut has_sret = false; let mut has_link = false; let mut has_vmctx = false; @@ -104,19 +104,19 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) { // Keep track of the argument types in the ABI-legalized signature. let mut abi_arg = 0; - // Process the EBB parameters one at a time, possibly replacing one argument with multiple new - // ones. We do this by detaching the entry EBB parameters first. - let ebb_params = pos.func.dfg.detach_ebb_params(entry); + // Process the block parameters one at a time, possibly replacing one argument with multiple new + // ones. We do this by detaching the entry block parameters first. + let block_params = pos.func.dfg.detach_block_params(entry); let mut old_arg = 0; - while let Some(arg) = ebb_params.get(old_arg, &pos.func.dfg.value_lists) { + while let Some(arg) = block_params.get(old_arg, &pos.func.dfg.value_lists) { old_arg += 1; let abi_type = pos.func.signature.params[abi_arg]; let arg_type = pos.func.dfg.value_type(arg); if arg_type == abi_type.value_type { // No value translation is necessary, this argument matches the ABI type. - // Just use the original EBB argument value. This is the most common case. - pos.func.dfg.attach_ebb_param(entry, arg); + // Just use the original block argument value. This is the most common case. + pos.func.dfg.attach_block_param(entry, arg); match abi_type.purpose { ArgumentPurpose::Normal => {} ArgumentPurpose::FramePointer => {} @@ -151,13 +151,13 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) { ); if ty == abi_type.value_type { abi_arg += 1; - Ok(func.dfg.append_ebb_param(entry, ty)) + Ok(func.dfg.append_block_param(entry, ty)) } else { Err(abi_type) } }; let converted = convert_from_abi(&mut pos, arg_type, Some(arg), &mut get_arg); - // The old `arg` is no longer an attached EBB argument, but there are probably still + // The old `arg` is no longer an attached block argument, but there are probably still // uses of the value. debug_assert_eq!(pos.func.dfg.resolve_aliases(arg), converted); } @@ -201,7 +201,7 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) { // Just create entry block values to match here. We will use them in `handle_return_abi()` // below. - pos.func.dfg.append_ebb_param(entry, arg.value_type); + pos.func.dfg.append_block_param(entry, arg.value_type); } } @@ -851,7 +851,7 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph let val = pos .func .dfg - .ebb_params(pos.func.layout.entry_block().unwrap())[idx]; + .block_params(pos.func.layout.entry_block().unwrap())[idx]; debug_assert_eq!(pos.func.dfg.value_type(val), arg.value_type); vlist.push(val, &mut pos.func.dfg.value_lists); @@ -958,8 +958,13 @@ fn round_up_to_multiple_of_pow2(n: u32, to: u32) -> u32 { /// /// Values that are passed into the function on the stack must be assigned to an `IncomingArg` /// stack slot already during legalization. -fn spill_entry_params(func: &mut Function, entry: Ebb) { - for (abi, &arg) in func.signature.params.iter().zip(func.dfg.ebb_params(entry)) { +fn spill_entry_params(func: &mut Function, entry: Block) { + for (abi, &arg) in func + .signature + .params + .iter() + .zip(func.dfg.block_params(entry)) + { if let ArgumentLoc::Stack(offset) = abi.location { let ss = func.stack_slots.make_incoming_arg(abi.value_type, offset); func.locations[arg] = ValueLoc::Stack(ss); diff --git a/cranelift/codegen/src/legalizer/heap.rs b/cranelift/codegen/src/legalizer/heap.rs index a6d9d9637d..cc4308c268 100644 --- a/cranelift/codegen/src/legalizer/heap.rs +++ b/cranelift/codegen/src/legalizer/heap.rs @@ -120,12 +120,12 @@ fn static_addr( pos.ins().trap(ir::TrapCode::HeapOutOfBounds); pos.func.dfg.replace(inst).iconst(addr_ty, 0); - // Split Ebb, as the trap is a terminator instruction. - let curr_ebb = pos.current_ebb().expect("Cursor is not in an ebb"); - let new_ebb = pos.func.dfg.make_ebb(); - pos.insert_ebb(new_ebb); - cfg.recompute_ebb(pos.func, curr_ebb); - cfg.recompute_ebb(pos.func, new_ebb); + // Split Block, as the trap is a terminator instruction. + let curr_block = pos.current_block().expect("Cursor is not in an block"); + let new_block = pos.func.dfg.make_block(); + pos.insert_block(new_block); + cfg.recompute_block(pos.func, curr_block); + cfg.recompute_block(pos.func, new_block); return; } diff --git a/cranelift/codegen/src/legalizer/mod.rs b/cranelift/codegen/src/legalizer/mod.rs index 3257949955..781767336a 100644 --- a/cranelift/codegen/src/legalizer/mod.rs +++ b/cranelift/codegen/src/legalizer/mod.rs @@ -87,7 +87,7 @@ fn legalize_inst( return LegalizeInstResult::SplitLegalizePending; } } - ir::ValueDef::Param(_ebb, _num) => {} + ir::ValueDef::Param(_block, _num) => {} } let res = pos.func.dfg.inst_results(inst).to_vec(); @@ -148,10 +148,10 @@ pub fn legalize_function(func: &mut ir::Function, cfg: &mut ControlFlowGraph, is let mut pos = FuncCursor::new(func); let func_begin = pos.position(); - // Split ebb params before trying to legalize instructions, so that the newly introduced + // Split block params before trying to legalize instructions, so that the newly introduced // isplit instructions get legalized. - while let Some(ebb) = pos.next_ebb() { - split::split_ebb_params(pos.func, cfg, ebb); + while let Some(block) = pos.next_block() { + split::split_block_params(pos.func, cfg, block); } pos.set_position(func_begin); @@ -159,9 +159,9 @@ pub fn legalize_function(func: &mut ir::Function, cfg: &mut ControlFlowGraph, is // This must be a set to prevent trying to legalize `isplit` and `vsplit` twice in certain cases. let mut pending_splits = BTreeSet::new(); - // Process EBBs in layout order. Some legalization actions may split the current EBB or append - // new ones to the end. We need to make sure we visit those new EBBs too. - while let Some(_ebb) = pos.next_ebb() { + // Process blocks in layout order. Some legalization actions may split the current block or append + // new ones to the end. We need to make sure we visit those new blocks too. + while let Some(_block) = pos.next_block() { // Keep track of the cursor position before the instruction being processed, so we can // double back when replacing instructions. let mut prev_pos = pos.position(); @@ -225,48 +225,48 @@ fn expand_cond_trap( _ => panic!("Expected cond trap: {}", func.dfg.display_inst(inst, None)), }; - // Split the EBB after `inst`: + // Split the block after `inst`: // // trapnz arg // .. // // Becomes: // - // brz arg, new_ebb_resume - // jump new_ebb_trap + // brz arg, new_block_resume + // jump new_block_trap // - // new_ebb_trap: + // new_block_trap: // trap // - // new_ebb_resume: + // new_block_resume: // .. - let old_ebb = func.layout.pp_ebb(inst); - let new_ebb_trap = func.dfg.make_ebb(); - let new_ebb_resume = func.dfg.make_ebb(); + let old_block = func.layout.pp_block(inst); + let new_block_trap = func.dfg.make_block(); + let new_block_resume = func.dfg.make_block(); // Replace trap instruction by the inverted condition. if trapz { - func.dfg.replace(inst).brnz(arg, new_ebb_resume, &[]); + func.dfg.replace(inst).brnz(arg, new_block_resume, &[]); } else { - func.dfg.replace(inst).brz(arg, new_ebb_resume, &[]); + func.dfg.replace(inst).brz(arg, new_block_resume, &[]); } // Add jump instruction after the inverted branch. let mut pos = FuncCursor::new(func).after_inst(inst); pos.use_srcloc(inst); - pos.ins().jump(new_ebb_trap, &[]); + pos.ins().jump(new_block_trap, &[]); // Insert the new label and the unconditional trap terminator. - pos.insert_ebb(new_ebb_trap); + pos.insert_block(new_block_trap); pos.ins().trap(code); // Insert the new label and resume the execution when the trap fails. - pos.insert_ebb(new_ebb_resume); + pos.insert_block(new_block_resume); // Finally update the CFG. - cfg.recompute_ebb(pos.func, old_ebb); - cfg.recompute_ebb(pos.func, new_ebb_resume); - cfg.recompute_ebb(pos.func, new_ebb_trap); + cfg.recompute_block(pos.func, old_block); + cfg.recompute_block(pos.func, new_block_resume); + cfg.recompute_block(pos.func, new_block_trap); } /// Jump tables. @@ -292,7 +292,7 @@ fn expand_br_table_jt( ) { use crate::ir::condcodes::IntCC; - let (arg, default_ebb, table) = match func.dfg[inst] { + let (arg, default_block, table) = match func.dfg[inst] { ir::InstructionData::BranchTable { opcode: ir::Opcode::BrTable, arg, @@ -304,22 +304,22 @@ fn expand_br_table_jt( // Rewrite: // - // br_table $idx, default_ebb, $jt + // br_table $idx, default_block, $jt // // To: // // $oob = ifcmp_imm $idx, len($jt) - // brif uge $oob, default_ebb - // jump fallthrough_ebb + // brif uge $oob, default_block + // jump fallthrough_block // - // fallthrough_ebb: + // fallthrough_block: // $base = jump_table_base.i64 $jt // $rel_addr = jump_table_entry.i64 $idx, $base, 4, $jt // $addr = iadd $base, $rel_addr // indirect_jump_table_br $addr, $jt - let ebb = func.layout.pp_ebb(inst); - let jump_table_ebb = func.dfg.make_ebb(); + let block = func.layout.pp_block(inst); + let jump_table_block = func.dfg.make_block(); let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); @@ -330,9 +330,9 @@ fn expand_br_table_jt( .ins() .icmp_imm(IntCC::UnsignedGreaterThanOrEqual, arg, table_size); - pos.ins().brnz(oob, default_ebb, &[]); - pos.ins().jump(jump_table_ebb, &[]); - pos.insert_ebb(jump_table_ebb); + pos.ins().brnz(oob, default_block, &[]); + pos.ins().jump(jump_table_block, &[]); + pos.insert_block(jump_table_block); let addr_ty = isa.pointer_type(); @@ -351,8 +351,8 @@ fn expand_br_table_jt( pos.ins().indirect_jump_table_br(addr, table); pos.remove_inst(); - cfg.recompute_ebb(pos.func, ebb); - cfg.recompute_ebb(pos.func, jump_table_ebb); + cfg.recompute_block(pos.func, block); + cfg.recompute_block(pos.func, jump_table_block); } /// Expand br_table to series of conditionals. @@ -364,7 +364,7 @@ fn expand_br_table_conds( ) { use crate::ir::condcodes::IntCC; - let (arg, default_ebb, table) = match func.dfg[inst] { + let (arg, default_block, table) = match func.dfg[inst] { ir::InstructionData::BranchTable { opcode: ir::Opcode::BrTable, arg, @@ -374,15 +374,15 @@ fn expand_br_table_conds( _ => panic!("Expected br_table: {}", func.dfg.display_inst(inst, None)), }; - let ebb = func.layout.pp_ebb(inst); + let block = func.layout.pp_block(inst); // This is a poor man's jump table using just a sequence of conditional branches. let table_size = func.jump_tables[table].len(); - let mut cond_failed_ebb = vec![]; + let mut cond_failed_block = vec![]; if table_size >= 1 { - cond_failed_ebb = alloc::vec::Vec::with_capacity(table_size - 1); + cond_failed_block = alloc::vec::Vec::with_capacity(table_size - 1); for _ in 0..table_size - 1 { - cond_failed_ebb.push(func.dfg.make_ebb()); + cond_failed_block.push(func.dfg.make_block()); } } @@ -397,19 +397,19 @@ fn expand_br_table_conds( pos.ins().brnz(t, dest, &[]); // Jump to the next case. if i < table_size - 1 { - let ebb = cond_failed_ebb[i]; - pos.ins().jump(ebb, &[]); - pos.insert_ebb(ebb); + let block = cond_failed_block[i]; + pos.ins().jump(block, &[]); + pos.insert_block(block); } } // `br_table` jumps to the default destination if nothing matches - pos.ins().jump(default_ebb, &[]); + pos.ins().jump(default_block, &[]); pos.remove_inst(); - cfg.recompute_ebb(pos.func, ebb); - for failed_ebb in cond_failed_ebb.into_iter() { - cfg.recompute_ebb(pos.func, failed_ebb); + cfg.recompute_block(pos.func, block); + for failed_block in cond_failed_block.into_iter() { + cfg.recompute_block(pos.func, failed_block); } } @@ -433,23 +433,23 @@ fn expand_select( // Replace `result = select ctrl, tval, fval` with: // - // brnz ctrl, new_ebb(tval) - // jump new_ebb(fval) - // new_ebb(result): - let old_ebb = func.layout.pp_ebb(inst); + // brnz ctrl, new_block(tval) + // jump new_block(fval) + // new_block(result): + let old_block = func.layout.pp_block(inst); let result = func.dfg.first_result(inst); func.dfg.clear_results(inst); - let new_ebb = func.dfg.make_ebb(); - func.dfg.attach_ebb_param(new_ebb, result); + let new_block = func.dfg.make_block(); + func.dfg.attach_block_param(new_block, result); - func.dfg.replace(inst).brnz(ctrl, new_ebb, &[tval]); + func.dfg.replace(inst).brnz(ctrl, new_block, &[tval]); let mut pos = FuncCursor::new(func).after_inst(inst); pos.use_srcloc(inst); - pos.ins().jump(new_ebb, &[fval]); - pos.insert_ebb(new_ebb); + pos.ins().jump(new_block, &[fval]); + pos.insert_block(new_block); - cfg.recompute_ebb(pos.func, new_ebb); - cfg.recompute_ebb(pos.func, old_ebb); + cfg.recompute_block(pos.func, new_block); + cfg.recompute_block(pos.func, old_block); } fn expand_br_icmp( @@ -458,7 +458,7 @@ fn expand_br_icmp( cfg: &mut ControlFlowGraph, _isa: &dyn TargetIsa, ) { - let (cond, a, b, destination, ebb_args) = match func.dfg[inst] { + let (cond, a, b, destination, block_args) = match func.dfg[inst] { ir::InstructionData::BranchIcmp { cond, destination, @@ -474,16 +474,16 @@ fn expand_br_icmp( _ => panic!("Expected br_icmp {}", func.dfg.display_inst(inst, None)), }; - let old_ebb = func.layout.pp_ebb(inst); + let old_block = func.layout.pp_block(inst); func.dfg.clear_results(inst); let icmp_res = func.dfg.replace(inst).icmp(cond, a, b); let mut pos = FuncCursor::new(func).after_inst(inst); pos.use_srcloc(inst); - pos.ins().brnz(icmp_res, destination, &ebb_args); + pos.ins().brnz(icmp_res, destination, &block_args); - cfg.recompute_ebb(pos.func, destination); - cfg.recompute_ebb(pos.func, old_ebb); + cfg.recompute_block(pos.func, destination); + cfg.recompute_block(pos.func, old_block); } /// Expand illegal `f32const` and `f64const` instructions. diff --git a/cranelift/codegen/src/legalizer/split.rs b/cranelift/codegen/src/legalizer/split.rs index 62f89b3975..ea4a032163 100644 --- a/cranelift/codegen/src/legalizer/split.rs +++ b/cranelift/codegen/src/legalizer/split.rs @@ -54,19 +54,19 @@ //! This means that the `iconcat` instructions defining `v1` and `v4` end up with no uses, so they //! can be trivially deleted by a dead code elimination pass. //! -//! # EBB arguments +//! # block arguments //! //! If all instructions that produce an `i64` value are legalized as above, we will eventually end -//! up with no `i64` values anywhere, except for EBB arguments. We can work around this by -//! iteratively splitting EBB arguments too. That should leave us with no illegal value types +//! up with no `i64` values anywhere, except for block arguments. We can work around this by +//! iteratively splitting block arguments too. That should leave us with no illegal value types //! anywhere. //! -//! It is possible to have circular dependencies of EBB arguments that are never used by any real +//! It is possible to have circular dependencies of block arguments that are never used by any real //! instructions. These loops will remain in the program. use crate::cursor::{Cursor, CursorPosition, FuncCursor}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; -use crate::ir::{self, Ebb, Inst, InstBuilder, InstructionData, Opcode, Type, Value, ValueDef}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; +use crate::ir::{self, Block, Inst, InstBuilder, InstructionData, Opcode, Type, Value, ValueDef}; use alloc::vec::Vec; use core::iter; use smallvec::SmallVec; @@ -95,7 +95,7 @@ pub fn vsplit( split_any(func, cfg, pos, srcloc, value, Opcode::Vconcat) } -/// After splitting an EBB argument, we need to go back and fix up all of the predecessor +/// After splitting an block argument, we need to go back and fix up all of the predecessor /// instructions. This is potentially a recursive operation, but we don't implement it recursively /// since that could use up too muck stack. /// @@ -104,11 +104,11 @@ struct Repair { concat: Opcode, // The argument type after splitting. split_type: Type, - // The destination EBB whose arguments have been split. - ebb: Ebb, - // Number of the original EBB argument which has been replaced by the low part. + // The destination block whose arguments have been split. + block: Block, + // Number of the original block argument which has been replaced by the low part. num: usize, - // Number of the new EBB argument which represents the high part after the split. + // Number of the new block argument which represents the high part after the split. hi_num: usize, } @@ -130,9 +130,9 @@ fn split_any( result } -pub fn split_ebb_params(func: &mut ir::Function, cfg: &ControlFlowGraph, ebb: Ebb) { - let pos = &mut FuncCursor::new(func).at_top(ebb); - let ebb_params = pos.func.dfg.ebb_params(ebb); +pub fn split_block_params(func: &mut ir::Function, cfg: &ControlFlowGraph, block: Block) { + let pos = &mut FuncCursor::new(func).at_top(block); + let block_params = pos.func.dfg.block_params(block); // Add further splittable types here. fn type_requires_splitting(ty: Type) -> bool { @@ -140,31 +140,31 @@ pub fn split_ebb_params(func: &mut ir::Function, cfg: &ControlFlowGraph, ebb: Eb } // A shortcut. If none of the param types require splitting, exit now. This helps because - // the loop below necessarily has to copy the ebb params into a new vector, so it's better to + // the loop below necessarily has to copy the block params into a new vector, so it's better to // avoid doing so when possible. - if !ebb_params + if !block_params .iter() - .any(|ebb_param| type_requires_splitting(pos.func.dfg.value_type(*ebb_param))) + .any(|block_param| type_requires_splitting(pos.func.dfg.value_type(*block_param))) { return; } let mut repairs = Vec::new(); - for (num, ebb_param) in ebb_params.to_vec().into_iter().enumerate() { - if !type_requires_splitting(pos.func.dfg.value_type(ebb_param)) { + for (num, block_param) in block_params.to_vec().into_iter().enumerate() { + if !type_requires_splitting(pos.func.dfg.value_type(block_param)) { continue; } - split_ebb_param(pos, ebb, num, ebb_param, Opcode::Iconcat, &mut repairs); + split_block_param(pos, block, num, block_param, Opcode::Iconcat, &mut repairs); } perform_repairs(pos, cfg, repairs); } fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Vec) { - // We have split the value requested, and now we may need to fix some EBB predecessors. + // We have split the value requested, and now we may need to fix some block predecessors. while let Some(repair) = repairs.pop() { - for BasicBlock { inst, .. } in cfg.pred_iter(repair.ebb) { + for BlockPredecessor { inst, .. } in cfg.pred_iter(repair.block) { let branch_opc = pos.func.dfg[inst].opcode(); debug_assert!( branch_opc.is_branch(), @@ -176,7 +176,7 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve .take_value_list() .expect("Branches must have value lists."); let num_args = args.len(&pos.func.dfg.value_lists); - // Get the old value passed to the EBB argument we're repairing. + // Get the old value passed to the block argument we're repairing. let old_arg = args .get(num_fixed_args + repair.num, &pos.func.dfg.value_lists) .expect("Too few branch arguments"); @@ -190,13 +190,13 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve // Split the old argument, possibly causing more repairs to be scheduled. pos.goto_inst(inst); - let inst_ebb = pos.func.layout.inst_ebb(inst).expect("inst in ebb"); + let inst_block = pos.func.layout.inst_block(inst).expect("inst in block"); // Insert split values prior to the terminal branch group. let canonical = pos .func .layout - .canonical_branch_inst(&pos.func.dfg, inst_ebb); + .canonical_branch_inst(&pos.func.dfg, inst_block); if let Some(first_branch) = canonical { pos.goto_inst(first_branch); } @@ -209,7 +209,7 @@ fn perform_repairs(pos: &mut FuncCursor, cfg: &ControlFlowGraph, mut repairs: Ve .unwrap() = lo; // The `hi` part goes at the end. Since multiple repairs may have been scheduled to the - // same EBB, there could be multiple arguments missing. + // same block, there could be multiple arguments missing. if num_args > num_fixed_args + repair.hi_num { *args .get_mut( @@ -259,11 +259,11 @@ fn split_value( } } } - ValueDef::Param(ebb, num) => { - // This is an EBB parameter. + ValueDef::Param(block, num) => { + // This is an block parameter. // We can split the parameter value unless this is the entry block. - if pos.func.layout.entry_block() != Some(ebb) { - reuse = Some(split_ebb_param(pos, ebb, num, value, concat, repairs)); + if pos.func.layout.entry_block() != Some(block) { + reuse = Some(split_block_param(pos, block, num, value, concat, repairs)); } } } @@ -273,7 +273,7 @@ fn split_value( pair } else { // No, we'll just have to insert the requested split instruction at `pos`. Note that `pos` - // has not been moved by the EBB argument code above when `reuse` is `None`. + // has not been moved by the block argument code above when `reuse` is `None`. match concat { Opcode::Iconcat => pos.ins().isplit(value), Opcode::Vconcat => pos.ins().vsplit(value), @@ -282,9 +282,9 @@ fn split_value( } } -fn split_ebb_param( +fn split_block_param( pos: &mut FuncCursor, - ebb: Ebb, + block: Block, param_num: usize, value: Value, concat: Opcode, @@ -300,14 +300,14 @@ fn split_ebb_param( }; // Since the `repairs` stack potentially contains other parameter numbers for - // `ebb`, avoid shifting and renumbering EBB parameters. It could invalidate other + // `block`, avoid shifting and renumbering block parameters. It could invalidate other // `repairs` entries. // // Replace the original `value` with the low part, and append the high part at the // end of the argument list. - let lo = pos.func.dfg.replace_ebb_param(value, split_type); - let hi_num = pos.func.dfg.num_ebb_params(ebb); - let hi = pos.func.dfg.append_ebb_param(ebb, split_type); + let lo = pos.func.dfg.replace_block_param(value, split_type); + let hi_num = pos.func.dfg.num_block_params(block); + let hi = pos.func.dfg.append_block_param(block, split_type); // Now the original value is dangling. Insert a concatenation instruction that can // compute it from the two new parameters. This also serves as a record of what we @@ -315,14 +315,14 @@ fn split_ebb_param( // // Note that it is safe to move `pos` here since `reuse` was set above, so we don't // need to insert a split instruction before returning. - pos.goto_first_inst(ebb); + pos.goto_first_inst(block); pos.ins() .with_result(value) .Binary(concat, split_type, lo, hi); - // Finally, splitting the EBB parameter is not enough. We also have to repair all + // Finally, splitting the block parameter is not enough. We also have to repair all // of the predecessor instructions that branch here. - add_repair(concat, split_type, ebb, param_num, hi_num, repairs); + add_repair(concat, split_type, block, param_num, hi_num, repairs); (lo, hi) } @@ -331,7 +331,7 @@ fn split_ebb_param( fn add_repair( concat: Opcode, split_type: Type, - ebb: Ebb, + block: Block, num: usize, hi_num: usize, repairs: &mut Vec, @@ -339,7 +339,7 @@ fn add_repair( repairs.push(Repair { concat, split_type, - ebb, + block, num, hi_num, }); diff --git a/cranelift/codegen/src/licm.rs b/cranelift/codegen/src/licm.rs index 4fa5d314f6..75000b5297 100644 --- a/cranelift/codegen/src/licm.rs +++ b/cranelift/codegen/src/licm.rs @@ -3,10 +3,10 @@ use crate::cursor::{Cursor, EncCursor, FuncCursor}; use crate::dominator_tree::DominatorTree; use crate::entity::{EntityList, ListPool}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::fx::FxHashSet; use crate::ir::{ - DataFlowGraph, Ebb, Function, Inst, InstBuilder, InstructionData, Layout, Opcode, Type, Value, + Block, DataFlowGraph, Function, Inst, InstBuilder, InstructionData, Layout, Opcode, Type, Value, }; use crate::isa::TargetIsa; use crate::loop_analysis::{Loop, LoopAnalysis}; @@ -65,23 +65,23 @@ pub fn do_licm( // A jump instruction to the header is placed at the end of the pre-header. fn create_pre_header( isa: &dyn TargetIsa, - header: Ebb, + header: Block, func: &mut Function, cfg: &mut ControlFlowGraph, domtree: &DominatorTree, -) -> Ebb { +) -> Block { let pool = &mut ListPool::::new(); - let header_args_values = func.dfg.ebb_params(header).to_vec(); + let header_args_values = func.dfg.block_params(header).to_vec(); let header_args_types: Vec = header_args_values .into_iter() .map(|val| func.dfg.value_type(val)) .collect(); - let pre_header = func.dfg.make_ebb(); + let pre_header = func.dfg.make_block(); let mut pre_header_args_value: EntityList = EntityList::new(); for typ in header_args_types { - pre_header_args_value.push(func.dfg.append_ebb_param(pre_header, typ), pool); + pre_header_args_value.push(func.dfg.append_block_param(pre_header, typ), pool); } - for BasicBlock { + for BlockPredecessor { inst: last_inst, .. } in cfg.pred_iter(header) { @@ -93,7 +93,7 @@ fn create_pre_header( { let mut pos = EncCursor::new(func, isa).at_top(header); // Inserts the pre-header at the right place in the layout. - pos.insert_ebb(pre_header); + pos.insert_block(pre_header); pos.next_inst(); pos.ins().jump(header, pre_header_args_value.as_slice(pool)); } @@ -104,16 +104,16 @@ fn create_pre_header( // // A loop header has a pre-header if there is only one predecessor that the header doesn't // dominate. -// Returns the pre-header Ebb and the instruction jumping to the header. +// Returns the pre-header Block and the instruction jumping to the header. fn has_pre_header( layout: &Layout, cfg: &ControlFlowGraph, domtree: &DominatorTree, - header: Ebb, -) -> Option<(Ebb, Inst)> { + header: Block, +) -> Option<(Block, Inst)> { let mut result = None; - for BasicBlock { - ebb: pred_ebb, + for BlockPredecessor { + block: pred_block, inst: branch_inst, } in cfg.pred_iter(header) { @@ -123,13 +123,13 @@ fn has_pre_header( // We have already found one, there are more than one return None; } - if branch_inst != layout.last_inst(pred_ebb).unwrap() - || cfg.succ_iter(pred_ebb).nth(1).is_some() + if branch_inst != layout.last_inst(pred_block).unwrap() + || cfg.succ_iter(pred_block).nth(1).is_some() { // It's along a critical edge, so don't use it. return None; } - result = Some((pred_ebb, branch_inst)); + result = Some((pred_block, branch_inst)); } } result @@ -176,7 +176,7 @@ fn is_loop_invariant(inst: Inst, dfg: &DataFlowGraph, loop_values: &FxHashSet = FxHashSet(); let mut invariant_insts: Vec = Vec::new(); let mut pos = FuncCursor::new(func); - // We traverse the loop EBB in reverse post-order. - for ebb in postorder_ebbs_loop(loop_analysis, cfg, lp).iter().rev() { - // Arguments of the EBB are loop values - for val in pos.func.dfg.ebb_params(*ebb) { + // We traverse the loop block in reverse post-order. + for block in postorder_blocks_loop(loop_analysis, cfg, lp).iter().rev() { + // Arguments of the block are loop values + for val in pos.func.dfg.block_params(*block) { loop_values.insert(*val); } - pos.goto_top(*ebb); + pos.goto_top(*block); #[cfg_attr(feature = "cargo-clippy", allow(clippy::block_in_if_condition_stmt))] while let Some(inst) = pos.next_inst() { if is_loop_invariant(inst, &pos.func.dfg, &loop_values) { @@ -215,8 +215,12 @@ fn remove_loop_invariant_instructions( invariant_insts } -/// Return ebbs from a loop in post-order, starting from an entry point in the block. -fn postorder_ebbs_loop(loop_analysis: &LoopAnalysis, cfg: &ControlFlowGraph, lp: Loop) -> Vec { +/// Return blocks from a loop in post-order, starting from an entry point in the block. +fn postorder_blocks_loop( + loop_analysis: &LoopAnalysis, + cfg: &ControlFlowGraph, + lp: Loop, +) -> Vec { let mut grey = FxHashSet(); let mut black = FxHashSet(); let mut stack = vec![loop_analysis.loop_header(lp)]; diff --git a/cranelift/codegen/src/loop_analysis.rs b/cranelift/codegen/src/loop_analysis.rs index 40de9afdff..dc659bc5f2 100644 --- a/cranelift/codegen/src/loop_analysis.rs +++ b/cranelift/codegen/src/loop_analysis.rs @@ -1,12 +1,12 @@ -//! A loop analysis represented as mappings of loops to their header Ebb +//! A loop analysis represented as mappings of loops to their header Block //! and parent in the loop tree. use crate::dominator_tree::DominatorTree; use crate::entity::entity_impl; use crate::entity::SecondaryMap; use crate::entity::{Keys, PrimaryMap}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; -use crate::ir::{Ebb, Function, Layout}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; +use crate::ir::{Block, Function, Layout}; use crate::packed_option::PackedOption; use crate::timing; use alloc::vec::Vec; @@ -18,22 +18,22 @@ entity_impl!(Loop, "loop"); /// Loop tree information for a single function. /// -/// Loops are referenced by the Loop object, and for each loop you can access its header EBB, -/// its eventual parent in the loop tree and all the EBB belonging to the loop. +/// Loops are referenced by the Loop object, and for each loop you can access its header block, +/// its eventual parent in the loop tree and all the block belonging to the loop. pub struct LoopAnalysis { loops: PrimaryMap, - ebb_loop_map: SecondaryMap>, + block_loop_map: SecondaryMap>, valid: bool, } struct LoopData { - header: Ebb, + header: Block, parent: PackedOption, } impl LoopData { /// Creates a `LoopData` object with the loop header and its eventual parent in the loop tree. - pub fn new(header: Ebb, parent: Option) -> Self { + pub fn new(header: Block, parent: Option) -> Self { Self { header, parent: parent.into(), @@ -49,7 +49,7 @@ impl LoopAnalysis { Self { valid: false, loops: PrimaryMap::new(), - ebb_loop_map: SecondaryMap::new(), + block_loop_map: SecondaryMap::new(), } } @@ -58,11 +58,11 @@ impl LoopAnalysis { self.loops.keys() } - /// Returns the header EBB of a particular loop. + /// Returns the header block of a particular loop. /// /// The characteristic property of a loop header block is that it dominates some of its /// predecessors. - pub fn loop_header(&self, lp: Loop) -> Ebb { + pub fn loop_header(&self, lp: Loop) -> Block { self.loops[lp].header } @@ -71,14 +71,14 @@ impl LoopAnalysis { self.loops[lp].parent.expand() } - /// Determine if an Ebb belongs to a loop by running a finger along the loop tree. + /// Determine if an Block belongs to a loop by running a finger along the loop tree. /// - /// Returns `true` if `ebb` is in loop `lp`. - pub fn is_in_loop(&self, ebb: Ebb, lp: Loop) -> bool { - let ebb_loop = self.ebb_loop_map[ebb]; - match ebb_loop.expand() { + /// Returns `true` if `block` is in loop `lp`. + pub fn is_in_loop(&self, block: Block, lp: Loop) -> bool { + let block_loop = self.block_loop_map[block]; + match block_loop.expand() { None => false, - Some(ebb_loop) => self.is_child_loop(ebb_loop, lp), + Some(block_loop) => self.is_child_loop(block_loop, lp), } } @@ -103,8 +103,8 @@ impl LoopAnalysis { pub fn compute(&mut self, func: &Function, cfg: &ControlFlowGraph, domtree: &DominatorTree) { let _tt = timing::loop_analysis(); self.loops.clear(); - self.ebb_loop_map.clear(); - self.ebb_loop_map.resize(func.dfg.num_ebbs()); + self.block_loop_map.clear(); + self.block_loop_map.resize(func.dfg.num_blocks()); self.find_loop_headers(cfg, domtree, &func.layout); self.discover_loop_blocks(cfg, domtree, &func.layout); self.valid = true; @@ -124,11 +124,11 @@ impl LoopAnalysis { /// memory be retained. pub fn clear(&mut self) { self.loops.clear(); - self.ebb_loop_map.clear(); + self.block_loop_map.clear(); self.valid = false; } - // Traverses the CFG in reverse postorder and create a loop object for every EBB having a + // Traverses the CFG in reverse postorder and create a loop object for every block having a // back edge. fn find_loop_headers( &mut self, @@ -137,16 +137,16 @@ impl LoopAnalysis { layout: &Layout, ) { // We traverse the CFG in reverse postorder - for &ebb in domtree.cfg_postorder().iter().rev() { - for BasicBlock { + for &block in domtree.cfg_postorder().iter().rev() { + for BlockPredecessor { inst: pred_inst, .. - } in cfg.pred_iter(ebb) + } in cfg.pred_iter(block) { - // If the ebb dominates one of its predecessors it is a back edge - if domtree.dominates(ebb, pred_inst, layout) { - // This ebb is a loop header, so we create its associated loop - let lp = self.loops.push(LoopData::new(ebb, None)); - self.ebb_loop_map[ebb] = lp.into(); + // If the block dominates one of its predecessors it is a back edge + if domtree.dominates(block, pred_inst, layout) { + // This block is a loop header, so we create its associated loop + let lp = self.loops.push(LoopData::new(block, None)); + self.block_loop_map[block] = lp.into(); break; // We break because we only need one back edge to identify a loop header. } @@ -155,7 +155,7 @@ impl LoopAnalysis { } // Intended to be called after `find_loop_headers`. For each detected loop header, - // discovers all the ebb belonging to the loop and its inner loops. After a call to this + // discovers all the block belonging to the loop and its inner loops. After a call to this // function, the loop tree is fully constructed. fn discover_loop_blocks( &mut self, @@ -163,12 +163,12 @@ impl LoopAnalysis { domtree: &DominatorTree, layout: &Layout, ) { - let mut stack: Vec = Vec::new(); + let mut stack: Vec = Vec::new(); // We handle each loop header in reverse order, corresponding to a pseudo postorder // traversal of the graph. for lp in self.loops().rev() { - for BasicBlock { - ebb: pred, + for BlockPredecessor { + block: pred, inst: pred_inst, } in cfg.pred_iter(self.loops[lp].header) { @@ -178,11 +178,11 @@ impl LoopAnalysis { } } while let Some(node) = stack.pop() { - let continue_dfs: Option; - match self.ebb_loop_map[node].expand() { + let continue_dfs: Option; + match self.block_loop_map[node].expand() { None => { // The node hasn't been visited yet, we tag it as part of the loop - self.ebb_loop_map[node] = PackedOption::from(lp); + self.block_loop_map[node] = PackedOption::from(lp); continue_dfs = Some(node); } Some(node_loop) => { @@ -221,7 +221,7 @@ impl LoopAnalysis { // Now we have handled the popped node and need to continue the DFS by adding the // predecessors of that node if let Some(continue_dfs) = continue_dfs { - for BasicBlock { ebb: pred, .. } in cfg.pred_iter(continue_dfs) { + for BlockPredecessor { block: pred, .. } in cfg.pred_iter(continue_dfs) { stack.push(pred) } } @@ -242,27 +242,27 @@ mod tests { #[test] fn nested_loops_detection() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); - let ebb3 = func.dfg.make_ebb(); - let cond = func.dfg.append_ebb_param(ebb0, types::I32); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); + let block3 = func.dfg.make_block(); + let cond = func.dfg.append_block_param(block0, types::I32); { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.ins().jump(ebb1, &[]); + cur.insert_block(block0); + cur.ins().jump(block1, &[]); - cur.insert_ebb(ebb1); - cur.ins().jump(ebb2, &[]); + cur.insert_block(block1); + cur.ins().jump(block2, &[]); - cur.insert_ebb(ebb2); - cur.ins().brnz(cond, ebb1, &[]); - cur.ins().jump(ebb3, &[]); + cur.insert_block(block2); + cur.ins().brnz(cond, block1, &[]); + cur.ins().jump(block3, &[]); - cur.insert_ebb(ebb3); - cur.ins().brnz(cond, ebb0, &[]); + cur.insert_block(block3); + cur.ins().brnz(cond, block0, &[]); } let mut loop_analysis = LoopAnalysis::new(); @@ -274,54 +274,54 @@ mod tests { let loops = loop_analysis.loops().collect::>(); assert_eq!(loops.len(), 2); - assert_eq!(loop_analysis.loop_header(loops[0]), ebb0); - assert_eq!(loop_analysis.loop_header(loops[1]), ebb1); + assert_eq!(loop_analysis.loop_header(loops[0]), block0); + assert_eq!(loop_analysis.loop_header(loops[1]), block1); assert_eq!(loop_analysis.loop_parent(loops[1]), Some(loops[0])); assert_eq!(loop_analysis.loop_parent(loops[0]), None); - assert_eq!(loop_analysis.is_in_loop(ebb0, loops[0]), true); - assert_eq!(loop_analysis.is_in_loop(ebb0, loops[1]), false); - assert_eq!(loop_analysis.is_in_loop(ebb1, loops[1]), true); - assert_eq!(loop_analysis.is_in_loop(ebb1, loops[0]), true); - assert_eq!(loop_analysis.is_in_loop(ebb2, loops[1]), true); - assert_eq!(loop_analysis.is_in_loop(ebb2, loops[0]), true); - assert_eq!(loop_analysis.is_in_loop(ebb3, loops[0]), true); - assert_eq!(loop_analysis.is_in_loop(ebb0, loops[1]), false); + assert_eq!(loop_analysis.is_in_loop(block0, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block0, loops[1]), false); + assert_eq!(loop_analysis.is_in_loop(block1, loops[1]), true); + assert_eq!(loop_analysis.is_in_loop(block1, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block2, loops[1]), true); + assert_eq!(loop_analysis.is_in_loop(block2, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block3, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block0, loops[1]), false); } #[test] fn complex_loop_detection() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); - let ebb3 = func.dfg.make_ebb(); - let ebb4 = func.dfg.make_ebb(); - let ebb5 = func.dfg.make_ebb(); - let cond = func.dfg.append_ebb_param(ebb0, types::I32); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); + let block3 = func.dfg.make_block(); + let block4 = func.dfg.make_block(); + let block5 = func.dfg.make_block(); + let cond = func.dfg.append_block_param(block0, types::I32); { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.ins().brnz(cond, ebb1, &[]); - cur.ins().jump(ebb3, &[]); + cur.insert_block(block0); + cur.ins().brnz(cond, block1, &[]); + cur.ins().jump(block3, &[]); - cur.insert_ebb(ebb1); - cur.ins().jump(ebb2, &[]); + cur.insert_block(block1); + cur.ins().jump(block2, &[]); - cur.insert_ebb(ebb2); - cur.ins().brnz(cond, ebb1, &[]); - cur.ins().jump(ebb5, &[]); + cur.insert_block(block2); + cur.ins().brnz(cond, block1, &[]); + cur.ins().jump(block5, &[]); - cur.insert_ebb(ebb3); - cur.ins().jump(ebb4, &[]); + cur.insert_block(block3); + cur.ins().jump(block4, &[]); - cur.insert_ebb(ebb4); - cur.ins().brnz(cond, ebb3, &[]); - cur.ins().jump(ebb5, &[]); + cur.insert_block(block4); + cur.ins().brnz(cond, block3, &[]); + cur.ins().jump(block5, &[]); - cur.insert_ebb(ebb5); - cur.ins().brnz(cond, ebb0, &[]); + cur.insert_block(block5); + cur.ins().brnz(cond, block0, &[]); } let mut loop_analysis = LoopAnalysis::new(); @@ -333,17 +333,17 @@ mod tests { let loops = loop_analysis.loops().collect::>(); assert_eq!(loops.len(), 3); - assert_eq!(loop_analysis.loop_header(loops[0]), ebb0); - assert_eq!(loop_analysis.loop_header(loops[1]), ebb1); - assert_eq!(loop_analysis.loop_header(loops[2]), ebb3); + assert_eq!(loop_analysis.loop_header(loops[0]), block0); + assert_eq!(loop_analysis.loop_header(loops[1]), block1); + assert_eq!(loop_analysis.loop_header(loops[2]), block3); assert_eq!(loop_analysis.loop_parent(loops[1]), Some(loops[0])); assert_eq!(loop_analysis.loop_parent(loops[2]), Some(loops[0])); assert_eq!(loop_analysis.loop_parent(loops[0]), None); - assert_eq!(loop_analysis.is_in_loop(ebb0, loops[0]), true); - assert_eq!(loop_analysis.is_in_loop(ebb1, loops[1]), true); - assert_eq!(loop_analysis.is_in_loop(ebb2, loops[1]), true); - assert_eq!(loop_analysis.is_in_loop(ebb3, loops[2]), true); - assert_eq!(loop_analysis.is_in_loop(ebb4, loops[2]), true); - assert_eq!(loop_analysis.is_in_loop(ebb5, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block0, loops[0]), true); + assert_eq!(loop_analysis.is_in_loop(block1, loops[1]), true); + assert_eq!(loop_analysis.is_in_loop(block2, loops[1]), true); + assert_eq!(loop_analysis.is_in_loop(block3, loops[2]), true); + assert_eq!(loop_analysis.is_in_loop(block4, loops[2]), true); + assert_eq!(loop_analysis.is_in_loop(block5, loops[0]), true); } } diff --git a/cranelift/codegen/src/nan_canonicalization.rs b/cranelift/codegen/src/nan_canonicalization.rs index 235203b764..e7c0e53419 100644 --- a/cranelift/codegen/src/nan_canonicalization.rs +++ b/cranelift/codegen/src/nan_canonicalization.rs @@ -18,7 +18,7 @@ static CANON_64BIT_NAN: u64 = 0b011111111111100000000000000000000000000000000000 pub fn do_nan_canonicalization(func: &mut Function) { let _tt = timing::canonicalize_nans(); let mut pos = FuncCursor::new(func); - while let Some(_ebb) = pos.next_ebb() { + while let Some(_block) = pos.next_block() { while let Some(inst) = pos.next_inst() { if is_fp_arith(&mut pos, inst) { add_nan_canon_seq(&mut pos, inst); @@ -59,7 +59,7 @@ fn add_nan_canon_seq(pos: &mut FuncCursor, inst: Inst) { let val = pos.func.dfg.first_result(inst); let val_type = pos.func.dfg.value_type(val); let new_res = pos.func.dfg.replace_result(val, val_type); - let _next_inst = pos.next_inst().expect("EBB missing terminator!"); + let _next_inst = pos.next_inst().expect("block missing terminator!"); // Insert a comparison instruction, to check if `inst_res` is NaN. Select // the canonical NaN value if `val` is NaN, assign the result to `inst`. diff --git a/cranelift/codegen/src/postopt.rs b/cranelift/codegen/src/postopt.rs index ef027f6f25..42121817d5 100644 --- a/cranelift/codegen/src/postopt.rs +++ b/cranelift/codegen/src/postopt.rs @@ -7,7 +7,7 @@ use crate::ir::condcodes::{CondCode, FloatCC, IntCC}; use crate::ir::dfg::ValueDef; use crate::ir::immediates::{Imm64, Offset32}; use crate::ir::instructions::{Opcode, ValueList}; -use crate::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, MemFlags, Type, Value}; +use crate::ir::{Block, Function, Inst, InstBuilder, InstructionData, MemFlags, Type, Value}; use crate::isa::TargetIsa; use crate::timing; @@ -18,7 +18,7 @@ struct CmpBrInfo { /// The icmp, icmp_imm, or fcmp instruction. cmp_inst: Inst, /// The destination of the branch. - destination: Ebb, + destination: Block, /// The arguments of the branch. args: ValueList, /// The first argument to the comparison. The second is in the `kind` field. @@ -360,7 +360,7 @@ fn optimize_complex_addresses(pos: &mut EncCursor, inst: Inst, isa: &dyn TargetI pub fn do_postopt(func: &mut Function, isa: &dyn TargetIsa) { let _tt = timing::postopt(); let mut pos = EncCursor::new(func, isa); - while let Some(_ebb) = pos.next_ebb() { + while let Some(_block) = pos.next_block() { let mut last_flags_clobber = None; while let Some(inst) = pos.next_inst() { if isa.uses_cpu_flags() { diff --git a/cranelift/codegen/src/print_errors.rs b/cranelift/codegen/src/print_errors.rs index 78ae325630..e4f6234ebd 100644 --- a/cranelift/codegen/src/print_errors.rs +++ b/cranelift/codegen/src/print_errors.rs @@ -2,7 +2,7 @@ use crate::entity::SecondaryMap; use crate::ir; -use crate::ir::entities::{AnyEntity, Ebb, Inst, Value}; +use crate::ir::entities::{AnyEntity, Block, Inst, Value}; use crate::ir::function::Function; use crate::isa::TargetIsa; use crate::result::CodegenError; @@ -47,15 +47,15 @@ pub fn pretty_verifier_error<'a>( struct PrettyVerifierError<'a>(Box, &'a mut Vec); impl<'a> FuncWriter for PrettyVerifierError<'a> { - fn write_ebb_header( + fn write_block_header( &mut self, w: &mut dyn Write, func: &Function, isa: Option<&dyn TargetIsa>, - ebb: Ebb, + block: Block, indent: usize, ) -> fmt::Result { - pretty_ebb_header_error(w, func, isa, ebb, indent, &mut *self.0, self.1) + pretty_block_header_error(w, func, isa, block, indent, &mut *self.0, self.1) } fn write_instruction( @@ -81,18 +81,18 @@ impl<'a> FuncWriter for PrettyVerifierError<'a> { } } -/// Pretty-print a function verifier error for a given EBB. -fn pretty_ebb_header_error( +/// Pretty-print a function verifier error for a given block. +fn pretty_block_header_error( w: &mut dyn Write, func: &Function, isa: Option<&dyn TargetIsa>, - cur_ebb: Ebb, + cur_block: Block, indent: usize, func_w: &mut dyn FuncWriter, errors: &mut Vec, ) -> fmt::Result { let mut s = String::new(); - func_w.write_ebb_header(&mut s, func, isa, cur_ebb, indent)?; + func_w.write_block_header(&mut s, func, isa, cur_block, indent)?; write!(w, "{}", s)?; // TODO: Use drain_filter here when it gets stabilized @@ -100,7 +100,7 @@ fn pretty_ebb_header_error( let mut printed_error = false; while i != errors.len() { match errors[i].location { - ir::entities::AnyEntity::Ebb(ebb) if ebb == cur_ebb => { + ir::entities::AnyEntity::Block(block) if block == cur_block => { if !printed_error { print_arrow(w, &s)?; printed_error = true; diff --git a/cranelift/codegen/src/redundant_reload_remover.rs b/cranelift/codegen/src/redundant_reload_remover.rs index 207cec2286..f33eb98fde 100644 --- a/cranelift/codegen/src/redundant_reload_remover.rs +++ b/cranelift/codegen/src/redundant_reload_remover.rs @@ -8,7 +8,8 @@ use crate::ir::dfg::DataFlowGraph; use crate::ir::instructions::BranchInfo; use crate::ir::stackslot::{StackSlotKind, StackSlots}; use crate::ir::{ - Ebb, Function, Inst, InstBuilder, InstructionData, Opcode, StackSlotData, Type, Value, ValueLoc, + Block, Function, Inst, InstBuilder, InstructionData, Opcode, StackSlotData, Type, Value, + ValueLoc, }; use crate::isa::{RegInfo, RegUnit, TargetIsa}; use crate::regalloc::RegDiversions; @@ -20,7 +21,7 @@ use cranelift_entity::{PrimaryMap, SecondaryMap}; // A description of the redundant-fill-removal algorithm // // -// The algorithm works forwards through each Ebb. It carries along and updates a table, +// The algorithm works forwards through each Block. It carries along and updates a table, // AvailEnv, with which it tracks registers that are known to have the same value as some stack // slot. The actions on encountering an instruction depend on the instruction, as follows: // @@ -68,19 +69,19 @@ use cranelift_entity::{PrimaryMap, SecondaryMap}; // // The overall algorithm, for a function, starts like this: // -// * (once per function): finds Ebbs that have two or more predecessors, since they will be the -// roots of Ebb trees. Also, the entry node for the function is considered to be a root. +// * (once per function): finds Blocks that have two or more predecessors, since they will be the +// roots of Block trees. Also, the entry node for the function is considered to be a root. // -// It then continues with a loop that first finds a tree of Ebbs ("discovery") and then removes +// It then continues with a loop that first finds a tree of Blocks ("discovery") and then removes // redundant fills as described above ("processing"): // -// * (discovery; once per tree): for each root, performs a depth first search to find all the Ebbs +// * (discovery; once per tree): for each root, performs a depth first search to find all the Blocks // in the tree, guided by RedundantReloadRemover::discovery_stack. // // * (processing; once per tree): the just-discovered tree is then processed as described above, // guided by RedundantReloadRemover::processing_stack. // -// In this way, all Ebbs reachable from the function's entry point are eventually processed. Note +// In this way, all Blocks reachable from the function's entry point are eventually processed. Note // that each tree is processed as soon as it has been discovered, so the algorithm never creates a // list of trees for the function. // @@ -88,7 +89,7 @@ use cranelift_entity::{PrimaryMap, SecondaryMap}; // reused for multiple functions so as to minimise heap turnover. The fields are, roughly: // // num_regunits -- constant for the whole function; used by the tree processing phase -// num_preds_per_ebb -- constant for the whole function; used by the tree discovery process +// num_preds_per_block -- constant for the whole function; used by the tree discovery process // // discovery_stack -- used to guide the tree discovery process // nodes_in_tree -- the discovered nodes are recorded here @@ -121,8 +122,8 @@ use cranelift_entity::{PrimaryMap, SecondaryMap}; // ============================================================================================= // Data structures used for discovery of trees -// `ZeroOneOrMany` is used to record the number of predecessors an Ebb block has. The `Zero` case -// is included so as to cleanly handle the case where the incoming graph has unreachable Ebbs. +// `ZeroOneOrMany` is used to record the number of predecessors an Block block has. The `Zero` case +// is included so as to cleanly handle the case where the incoming graph has unreachable Blocks. #[derive(Clone, PartialEq)] enum ZeroOneOrMany { @@ -183,23 +184,23 @@ struct AvailEnv { } // `ProcessingStackElem` combines AvailEnv with contextual information needed to "navigate" within -// an Ebb. +// an Block. // -// A ProcessingStackElem conceptually has the lifetime of exactly one Ebb: once the current Ebb is +// A ProcessingStackElem conceptually has the lifetime of exactly one Block: once the current Block is // completed, the ProcessingStackElem will be abandoned. In practice the top level state, // RedundantReloadRemover, caches them, so as to avoid heap turnover. // // Note that ProcessingStackElem must contain a CursorPosition. The CursorPosition, which -// indicates where we are in the current Ebb, cannot be implicitly maintained by looping over all -// the instructions in an Ebb in turn, because we may choose to suspend processing the current Ebb +// indicates where we are in the current Block, cannot be implicitly maintained by looping over all +// the instructions in an Block in turn, because we may choose to suspend processing the current Block // at a side exit, continue by processing the subtree reached via the side exit, and only later -// resume the current Ebb. +// resume the current Block. struct ProcessingStackElem { - /// Indicates the AvailEnv at the current point in the Ebb. + /// Indicates the AvailEnv at the current point in the Block. avail_env: AvailEnv, - /// Shows where we currently are inside the Ebb. + /// Shows where we currently are inside the Block. cursor: CursorPosition, /// Indicates the currently active register diversions at the current point. @@ -212,7 +213,7 @@ struct ProcessingStackElem { // `RedundantReloadRemover` contains data structures for the two passes: discovery of tree shaped // regions, and processing of them. These are allocated once and stay alive for the entire // function, even though they are cleared out for each new tree shaped region. It also caches -// `num_regunits` and `num_preds_per_ebb`, which are computed at the start of each function and +// `num_regunits` and `num_preds_per_block`, which are computed at the start of each function and // then remain constant. /// The redundant reload remover's state. @@ -222,22 +223,22 @@ pub struct RedundantReloadRemover { /// function. num_regunits: Option, - /// This stores, for each Ebb, a characterisation of the number of predecessors it has. - num_preds_per_ebb: PrimaryMap, + /// This stores, for each Block, a characterisation of the number of predecessors it has. + num_preds_per_block: PrimaryMap, /// The stack used for the first phase (discovery). There is one element on the discovery - /// stack for each currently unexplored Ebb in the tree being searched. - discovery_stack: Vec, + /// stack for each currently unexplored Block in the tree being searched. + discovery_stack: Vec, /// The nodes in the discovered tree are inserted here. - nodes_in_tree: EntitySet, + nodes_in_tree: EntitySet, /// The stack used during the second phase (transformation). There is one element on the /// processing stack for each currently-open node in the tree being transformed. processing_stack: Vec, /// Used in the second phase to avoid visiting nodes more than once. - nodes_already_visited: EntitySet, + nodes_already_visited: EntitySet, } // ============================================================================================= @@ -301,17 +302,17 @@ fn slot_of_value<'s>( impl RedundantReloadRemover { // A helper for `add_nodes_to_tree` below. - fn discovery_stack_push_successors_of(&mut self, cfg: &ControlFlowGraph, node: Ebb) { + fn discovery_stack_push_successors_of(&mut self, cfg: &ControlFlowGraph, node: Block) { for successor in cfg.succ_iter(node) { self.discovery_stack.push(successor); } } - // Visit the tree of Ebbs rooted at `starting_point` and add them to `self.nodes_in_tree`. - // `self.num_preds_per_ebb` guides the process, ensuring we don't leave the tree-ish region + // Visit the tree of Blocks rooted at `starting_point` and add them to `self.nodes_in_tree`. + // `self.num_preds_per_block` guides the process, ensuring we don't leave the tree-ish region // and indirectly ensuring that the process will terminate in the presence of cycles in the // graph. `self.discovery_stack` holds the search state in this function. - fn add_nodes_to_tree(&mut self, cfg: &ControlFlowGraph, starting_point: Ebb) { + fn add_nodes_to_tree(&mut self, cfg: &ControlFlowGraph, starting_point: Block) { // One might well ask why this doesn't loop forever when it encounters cycles in the // control flow graph. The reason is that any cycle in the graph that is reachable from // anywhere outside the cycle -- in particular, that is reachable from the function's @@ -325,7 +326,7 @@ impl RedundantReloadRemover { self.discovery_stack_push_successors_of(cfg, starting_point); while let Some(node) = self.discovery_stack.pop() { - match self.num_preds_per_ebb[node] { + match self.num_preds_per_block[node] { // We arrived at a node with multiple predecessors, so it's a new root. Ignore it. ZeroOneOrMany::Many => {} // This node has just one predecessor, so we should incorporate it in the tree and @@ -652,8 +653,8 @@ impl RedundantReloadRemover { impl RedundantReloadRemover { // Push a clone of the top-of-stack ProcessingStackElem. This will be used to process exactly - // one Ebb. The diversions are created new, rather than cloned, to reflect the fact - // that diversions are local to each Ebb. + // one Block. The diversions are created new, rather than cloned, to reflect the fact + // that diversions are local to each Block. fn processing_stack_push(&mut self, cursor: CursorPosition) { let avail_env = if let Some(stack_top) = self.processing_stack.last() { stack_top.avail_env.clone() @@ -674,7 +675,7 @@ impl RedundantReloadRemover { // This pushes the node `dst` onto the processing stack, and sets up the new // ProcessingStackElem accordingly. But it does all that only if `dst` is part of the current // tree *and* we haven't yet visited it. - fn processing_stack_maybe_push(&mut self, dst: Ebb) { + fn processing_stack_maybe_push(&mut self, dst: Block) { if self.nodes_in_tree.contains(dst) && !self.nodes_already_visited.contains(dst) { if !self.processing_stack.is_empty() { // If this isn't the outermost node in the tree (that is, the root), then it must @@ -682,7 +683,7 @@ impl RedundantReloadRemover { // incorporated in any tree. Nodes with two or more predecessors are the root of // some other tree, and visiting them as if they were part of the current tree // would be a serious error. - debug_assert!(self.num_preds_per_ebb[dst] == ZeroOneOrMany::One); + debug_assert!(self.num_preds_per_block[dst] == ZeroOneOrMany::One); } self.processing_stack_push(CursorPosition::Before(dst)); self.nodes_already_visited.insert(dst); @@ -697,7 +698,7 @@ impl RedundantReloadRemover { func: &mut Function, reginfo: &RegInfo, isa: &dyn TargetIsa, - root: Ebb, + root: Block, ) { debug_assert!(self.nodes_in_tree.contains(root)); debug_assert!(self.processing_stack.is_empty()); @@ -728,10 +729,10 @@ impl RedundantReloadRemover { // Update diversions after the insn. self.processing_stack[tos].diversions.apply(&func.dfg[inst]); - // If the insn can branch outside this Ebb, push work items on the stack for all - // target Ebbs that are part of the same tree and that we haven't yet visited. + // If the insn can branch outside this Block, push work items on the stack for all + // target Blocks that are part of the same tree and that we haven't yet visited. // The next iteration of this instruction-processing loop will immediately start - // work on the most recently pushed Ebb, and will eventually continue in this Ebb + // work on the most recently pushed Block, and will eventually continue in this Block // when those new items have been removed from the stack. match func.dfg.analyze_branch(inst) { BranchInfo::NotABranch => (), @@ -748,7 +749,7 @@ impl RedundantReloadRemover { } } } else { - // We've come to the end of the current work-item (Ebb). We'll already have + // We've come to the end of the current work-item (Block). We'll already have // processed the fallthrough/continuation/whatever for it using the logic above. // Pop it off the stack and resume work on its parent. self.processing_stack.pop(); @@ -765,11 +766,11 @@ impl RedundantReloadRemover { pub fn new() -> Self { Self { num_regunits: None, - num_preds_per_ebb: PrimaryMap::::with_capacity(8), - discovery_stack: Vec::::with_capacity(16), - nodes_in_tree: EntitySet::::new(), + num_preds_per_block: PrimaryMap::::with_capacity(8), + discovery_stack: Vec::::with_capacity(16), + nodes_in_tree: EntitySet::::new(), processing_stack: Vec::::with_capacity(8), - nodes_already_visited: EntitySet::::new(), + nodes_already_visited: EntitySet::::new(), } } @@ -779,7 +780,7 @@ impl RedundantReloadRemover { } fn clear_for_new_function(&mut self) { - self.num_preds_per_ebb.clear(); + self.num_preds_per_block.clear(); self.clear_for_new_tree(); } @@ -798,19 +799,19 @@ impl RedundantReloadRemover { isa: &dyn TargetIsa, cfg: &ControlFlowGraph, ) { - // Fail in an obvious way if there are more than (2^32)-1 Ebbs in this function. - let num_ebbs: u32 = func.dfg.num_ebbs().try_into().unwrap(); + // Fail in an obvious way if there are more than (2^32)-1 Blocks in this function. + let num_blocks: u32 = func.dfg.num_blocks().try_into().unwrap(); // Clear out per-tree state. self.clear_for_new_function(); // Create a PrimaryMap that summarises the number of predecessors for each block, as 0, 1 // or "many", and that also claims the entry block as having "many" predecessors. - self.num_preds_per_ebb.clear(); - self.num_preds_per_ebb.reserve(num_ebbs as usize); + self.num_preds_per_block.clear(); + self.num_preds_per_block.reserve(num_blocks as usize); - for i in 0..num_ebbs { - let mut pi = cfg.pred_iter(Ebb::from_u32(i)); + for i in 0..num_blocks { + let mut pi = cfg.pred_iter(Block::from_u32(i)); let mut n_pi = ZeroOneOrMany::Zero; if pi.next().is_some() { n_pi = ZeroOneOrMany::One; @@ -819,24 +820,24 @@ impl RedundantReloadRemover { // We don't care if there are more than two preds, so stop counting now. } } - self.num_preds_per_ebb.push(n_pi); + self.num_preds_per_block.push(n_pi); } - debug_assert!(self.num_preds_per_ebb.len() == num_ebbs as usize); + debug_assert!(self.num_preds_per_block.len() == num_blocks as usize); // The entry block must be the root of some tree, so set up the state to reflect that. - let entry_ebb = func + let entry_block = func .layout .entry_block() - .expect("do_redundant_fill_removal_on_function: entry ebb unknown"); - debug_assert!(self.num_preds_per_ebb[entry_ebb] == ZeroOneOrMany::Zero); - self.num_preds_per_ebb[entry_ebb] = ZeroOneOrMany::Many; + .expect("do_redundant_fill_removal_on_function: entry block unknown"); + debug_assert!(self.num_preds_per_block[entry_block] == ZeroOneOrMany::Zero); + self.num_preds_per_block[entry_block] = ZeroOneOrMany::Many; // Now build and process trees. - for root_ix in 0..self.num_preds_per_ebb.len() { - let root = Ebb::from_u32(root_ix as u32); + for root_ix in 0..self.num_preds_per_block.len() { + let root = Block::from_u32(root_ix as u32); // Build a tree for each node that has two or more preds, and ignore all other nodes. - if self.num_preds_per_ebb[root] != ZeroOneOrMany::Many { + if self.num_preds_per_block[root] != ZeroOneOrMany::Many { continue; } @@ -846,7 +847,7 @@ impl RedundantReloadRemover { // Discovery phase: build the tree, as `root` and `self.nodes_in_tree`. self.add_nodes_to_tree(cfg, root); debug_assert!(self.nodes_in_tree.cardinality() > 0); - debug_assert!(self.num_preds_per_ebb[root] == ZeroOneOrMany::Many); + debug_assert!(self.num_preds_per_block[root] == ZeroOneOrMany::Many); // Processing phase: do redundant-reload-removal. self.process_tree(func, reginfo, isa, root); diff --git a/cranelift/codegen/src/regalloc/branch_splitting.rs b/cranelift/codegen/src/regalloc/branch_splitting.rs index 35291e5213..4e9a159f3e 100644 --- a/cranelift/codegen/src/regalloc/branch_splitting.rs +++ b/cranelift/codegen/src/regalloc/branch_splitting.rs @@ -7,7 +7,7 @@ use alloc::vec::Vec; use crate::cursor::{Cursor, EncCursor}; use crate::dominator_tree::DominatorTree; use crate::flowgraph::ControlFlowGraph; -use crate::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList}; +use crate::ir::{Block, Function, Inst, InstBuilder, InstructionData, Opcode, ValueList}; use crate::isa::TargetIsa; use crate::topo_order::TopoOrder; @@ -43,12 +43,12 @@ struct Context<'a> { impl<'a> Context<'a> { fn run(&mut self) { - // Any ebb order will do. - self.topo.reset(self.cur.func.layout.ebbs()); - while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) { + // Any block order will do. + self.topo.reset(self.cur.func.layout.blocks()); + while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) { // Branches can only be at the last or second to last position in an extended basic // block. - self.cur.goto_last_inst(ebb); + self.cur.goto_last_inst(block); let terminator_inst = self.cur.current_inst().expect("terminator"); if let Some(inst) = self.cur.prev_inst() { let opcode = self.cur.func.dfg[inst].opcode(); @@ -80,38 +80,38 @@ impl<'a> Context<'a> { // If there are any parameters, split the edge. if self.should_split_edge(target) { // Create the block the branch will jump to. - let new_ebb = self.cur.func.dfg.make_ebb(); + let new_block = self.cur.func.dfg.make_block(); // Insert the new block before the destination, such that it can fallthrough in the // target block. assert_ne!(Some(target), self.cur.layout().entry_block()); - self.cur.layout_mut().insert_ebb(new_ebb, target); + self.cur.layout_mut().insert_block(new_block, target); self.has_new_blocks = true; - // Extract the arguments of the branch instruction, split the Ebb parameters and the + // Extract the arguments of the branch instruction, split the Block parameters and the // branch arguments let num_fixed = opcode.constraints().num_fixed_value_arguments(); let dfg = &mut self.cur.func.dfg; let old_args: Vec<_> = { - let args = dfg[branch].take_value_list().expect("ebb parameters"); + let args = dfg[branch].take_value_list().expect("block parameters"); args.as_slice(&dfg.value_lists).iter().copied().collect() }; - let (branch_args, ebb_params) = old_args.split_at(num_fixed); + let (branch_args, block_params) = old_args.split_at(num_fixed); - // Replace the branch destination by the new Ebb created with no parameters, and restore - // the branch arguments, without the original Ebb parameters. + // Replace the branch destination by the new Block created with no parameters, and restore + // the branch arguments, without the original Block parameters. { let branch_args = ValueList::from_slice(branch_args, &mut dfg.value_lists); let data = &mut dfg[branch]; - *data.branch_destination_mut().expect("branch") = new_ebb; + *data.branch_destination_mut().expect("branch") = new_block; data.put_value_list(branch_args); } let ok = self.cur.func.update_encoding(branch, self.cur.isa).is_ok(); debug_assert!(ok); // Insert a jump to the original target with its arguments into the new block. - self.cur.goto_first_insertion_point(new_ebb); - self.cur.ins().jump(target, ebb_params); + self.cur.goto_first_insertion_point(new_block); + self.cur.ins().jump(target, block_params); // Reset the cursor to point to the branch. self.cur.goto_inst(branch); @@ -122,7 +122,7 @@ impl<'a> Context<'a> { let inst_data = &self.cur.func.dfg[inst]; let opcode = inst_data.opcode(); if opcode != Opcode::Jump && opcode != Opcode::Fallthrough { - // This opcode is ignored as it does not have any EBB parameters. + // This opcode is ignored as it does not have any block parameters. if opcode != Opcode::IndirectJumpTableBr { debug_assert!(!opcode.is_branch()) } @@ -141,23 +141,23 @@ impl<'a> Context<'a> { // If there are any parameters, split the edge. if self.should_split_edge(*target) { // Create the block the branch will jump to. - let new_ebb = self.cur.func.dfg.make_ebb(); + let new_block = self.cur.func.dfg.make_block(); self.has_new_blocks = true; // Split the current block before its terminator, and insert a new jump instruction to // jump to it. - let jump = self.cur.ins().jump(new_ebb, &[]); - self.cur.insert_ebb(new_ebb); + let jump = self.cur.ins().jump(new_block, &[]); + self.cur.insert_block(new_block); - // Reset the cursor to point to new terminator of the old ebb. + // Reset the cursor to point to new terminator of the old block. self.cur.goto_inst(jump); } } /// Returns whether we should introduce a new branch. - fn should_split_edge(&self, target: Ebb) -> bool { + fn should_split_edge(&self, target: Block) -> bool { // We should split the edge if the target has any parameters. - if !self.cur.func.dfg.ebb_params(target).is_empty() { + if !self.cur.func.dfg.block_params(target).is_empty() { return true; }; diff --git a/cranelift/codegen/src/regalloc/coalescing.rs b/cranelift/codegen/src/regalloc/coalescing.rs index c408b912fa..4067a950cf 100644 --- a/cranelift/codegen/src/regalloc/coalescing.rs +++ b/cranelift/codegen/src/regalloc/coalescing.rs @@ -2,16 +2,16 @@ //! //! Conventional SSA (CSSA) form is a subset of SSA form where any (transitively) phi-related //! values do not interfere. We construct CSSA by building virtual registers that are as large as -//! possible and inserting copies where necessary such that all argument values passed to an EBB -//! parameter will belong to the same virtual register as the EBB parameter value itself. +//! possible and inserting copies where necessary such that all argument values passed to an block +//! parameter will belong to the same virtual register as the block parameter value itself. use crate::cursor::{Cursor, EncCursor}; use crate::dbg::DisplayList; use crate::dominator_tree::{DominatorTree, DominatorTreePreorder}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::fx::FxHashMap; use crate::ir::{self, InstBuilder, ProgramOrder}; -use crate::ir::{Ebb, ExpandedProgramPoint, Function, Inst, Value}; +use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Value}; use crate::isa::{EncInfo, TargetIsa}; use crate::regalloc::affinity::Affinity; use crate::regalloc::liveness::Liveness; @@ -40,8 +40,8 @@ use log::debug; // // Phase 1: Union-find. // -// We use the union-find support in `VirtRegs` to build virtual registers such that EBB parameter -// values always belong to the same virtual register as their corresponding EBB arguments at the +// We use the union-find support in `VirtRegs` to build virtual registers such that block parameter +// values always belong to the same virtual register as their corresponding block arguments at the // predecessor branches. Trivial interferences between parameter and argument value live ranges are // detected and resolved before unioning congruence classes, but non-trivial interferences between // values that end up in the same congruence class are possible. @@ -135,8 +135,8 @@ impl Coalescing { }; // Run phase 1 (union-find) of the coalescing algorithm on the current function. - for &ebb in domtree.cfg_postorder() { - context.union_find_ebb(ebb); + for &block in domtree.cfg_postorder() { + context.union_find_block(block); } context.finish_union_find(); @@ -147,114 +147,114 @@ impl Coalescing { /// Phase 1: Union-find. /// -/// The two entry points for phase 1 are `union_find_ebb()` and `finish_union_find`. +/// The two entry points for phase 1 are `union_find_block()` and `finish_union_find`. impl<'a> Context<'a> { - /// Run the union-find algorithm on the parameter values on `ebb`. + /// Run the union-find algorithm on the parameter values on `block`. /// - /// This ensure that all EBB parameters will belong to the same virtual register as their + /// This ensure that all block parameters will belong to the same virtual register as their /// corresponding arguments at all predecessor branches. - pub fn union_find_ebb(&mut self, ebb: Ebb) { - let num_params = self.func.dfg.num_ebb_params(ebb); + pub fn union_find_block(&mut self, block: Block) { + let num_params = self.func.dfg.num_block_params(block); if num_params == 0 { return; } - self.isolate_conflicting_params(ebb, num_params); + self.isolate_conflicting_params(block, num_params); for i in 0..num_params { - self.union_pred_args(ebb, i); + self.union_pred_args(block, i); } } - // Identify EBB parameter values that are live at one of the predecessor branches. + // Identify block parameter values that are live at one of the predecessor branches. // // Such a parameter value will conflict with any argument value at the predecessor branch, so // it must be isolated by inserting a copy. - fn isolate_conflicting_params(&mut self, ebb: Ebb, num_params: usize) { - debug_assert_eq!(num_params, self.func.dfg.num_ebb_params(ebb)); - // The only way a parameter value can interfere with a predecessor branch is if the EBB is + fn isolate_conflicting_params(&mut self, block: Block, num_params: usize) { + debug_assert_eq!(num_params, self.func.dfg.num_block_params(block)); + // The only way a parameter value can interfere with a predecessor branch is if the block is // dominating the predecessor branch. That is, we are looking for loop back-edges. - for BasicBlock { - ebb: pred_ebb, + for BlockPredecessor { + block: pred_block, inst: pred_inst, - } in self.cfg.pred_iter(ebb) + } in self.cfg.pred_iter(block) { - // The quick pre-order dominance check is accurate because the EBB parameter is defined - // at the top of the EBB before any branches. - if !self.preorder.dominates(ebb, pred_ebb) { + // The quick pre-order dominance check is accurate because the block parameter is defined + // at the top of the block before any branches. + if !self.preorder.dominates(block, pred_block) { continue; } debug!( " - checking {} params at back-edge {}: {}", num_params, - pred_ebb, + pred_block, self.func.dfg.display_inst(pred_inst, self.isa) ); // Now `pred_inst` is known to be a back-edge, so it is possible for parameter values // to be live at the use. for i in 0..num_params { - let param = self.func.dfg.ebb_params(ebb)[i]; - if self.liveness[param].reaches_use(pred_inst, pred_ebb, &self.func.layout) { - self.isolate_param(ebb, param); + let param = self.func.dfg.block_params(block)[i]; + if self.liveness[param].reaches_use(pred_inst, pred_block, &self.func.layout) { + self.isolate_param(block, param); } } } } - // Union EBB parameter value `num` with the corresponding EBB arguments on the predecessor + // Union block parameter value `num` with the corresponding block arguments on the predecessor // branches. // - // Detect cases where the argument value is live-in to `ebb` so it conflicts with any EBB + // Detect cases where the argument value is live-in to `block` so it conflicts with any block // parameter. Isolate the argument in those cases before unioning it with the parameter value. - fn union_pred_args(&mut self, ebb: Ebb, argnum: usize) { - let param = self.func.dfg.ebb_params(ebb)[argnum]; + fn union_pred_args(&mut self, block: Block, argnum: usize) { + let param = self.func.dfg.block_params(block)[argnum]; - for BasicBlock { - ebb: pred_ebb, + for BlockPredecessor { + block: pred_block, inst: pred_inst, - } in self.cfg.pred_iter(ebb) + } in self.cfg.pred_iter(block) { let arg = self.func.dfg.inst_variable_args(pred_inst)[argnum]; // Never coalesce incoming function parameters on the stack. These parameters are // pre-spilled, and the rest of the virtual register would be forced to spill to the // `incoming_arg` stack slot too. - if let ir::ValueDef::Param(def_ebb, def_num) = self.func.dfg.value_def(arg) { - if Some(def_ebb) == self.func.layout.entry_block() + if let ir::ValueDef::Param(def_block, def_num) = self.func.dfg.value_def(arg) { + if Some(def_block) == self.func.layout.entry_block() && self.func.signature.params[def_num].location.is_stack() { debug!("-> isolating function stack parameter {}", arg); - let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg); + let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg); self.virtregs.union(param, new_arg); continue; } } // Check for basic interference: If `arg` overlaps a value defined at the entry to - // `ebb`, it can never be used as an EBB argument. + // `block`, it can never be used as an block argument. let interference = { let lr = &self.liveness[arg]; - // There are two ways the argument value can interfere with `ebb`: + // There are two ways the argument value can interfere with `block`: // - // 1. It is defined in a dominating EBB and live-in to `ebb`. - // 2. If is itself a parameter value for `ebb`. This case should already have been + // 1. It is defined in a dominating block and live-in to `block`. + // 2. If is itself a parameter value for `block`. This case should already have been // eliminated by `isolate_conflicting_params()`. debug_assert!( - lr.def() != ebb.into(), + lr.def() != block.into(), "{} parameter {} was missed by isolate_conflicting_params()", - ebb, + block, arg ); - // The only other possibility is that `arg` is live-in to `ebb`. - lr.is_livein(ebb, &self.func.layout) + // The only other possibility is that `arg` is live-in to `block`. + lr.is_livein(block, &self.func.layout) }; if interference { - let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg); + let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg); self.virtregs.union(param, new_arg); } else { self.virtregs.union(param, arg); @@ -262,31 +262,31 @@ impl<'a> Context<'a> { } } - // Isolate EBB parameter value `param` on `ebb`. + // Isolate block parameter value `param` on `block`. // // When `param=v10`: // - // ebb1(v10: i32): + // block1(v10: i32): // foo // // becomes: // - // ebb1(v11: i32): + // block1(v11: i32): // v10 = copy v11 // foo // // This function inserts the copy and updates the live ranges of the old and new parameter // values. Returns the new parameter value. - fn isolate_param(&mut self, ebb: Ebb, param: Value) -> Value { + fn isolate_param(&mut self, block: Block, param: Value) -> Value { debug_assert_eq!( self.func.dfg.value_def(param).pp(), - ExpandedProgramPoint::Ebb(ebb) + ExpandedProgramPoint::Block(block) ); let ty = self.func.dfg.value_type(param); - let new_val = self.func.dfg.replace_ebb_param(param, ty); + let new_val = self.func.dfg.replace_block_param(param, ty); - // Insert a copy instruction at the top of `ebb`. - let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(ebb); + // Insert a copy instruction at the top of `block`. + let mut pos = EncCursor::new(self.func, self.isa).at_first_inst(block); if let Some(inst) = pos.current_inst() { pos.use_srcloc(inst); } @@ -297,7 +297,7 @@ impl<'a> Context<'a> { debug!( "-> inserted {}, following {}({}: {})", pos.display_inst(inst), - ebb, + block, new_val, ty ); @@ -311,27 +311,27 @@ impl<'a> Context<'a> { .expect("Bad copy encoding") .outs[0], ); - self.liveness.create_dead(new_val, ebb, affinity); + self.liveness.create_dead(new_val, block, affinity); self.liveness - .extend_locally(new_val, ebb, inst, &pos.func.layout); + .extend_locally(new_val, block, inst, &pos.func.layout); new_val } - // Isolate the EBB argument `pred_val` from the predecessor `(pred_ebb, pred_inst)`. + // Isolate the block argument `pred_val` from the predecessor `(pred_block, pred_inst)`. // - // It is assumed that `pred_inst` is a branch instruction in `pred_ebb` whose `argnum`'th EBB - // argument is `pred_val`. Since the argument value interferes with the corresponding EBB + // It is assumed that `pred_inst` is a branch instruction in `pred_block` whose `argnum`'th block + // argument is `pred_val`. Since the argument value interferes with the corresponding block // parameter at the destination, a copy is used instead: // - // brnz v1, ebb2(v10) + // brnz v1, block2(v10) // // Becomes: // // v11 = copy v10 - // brnz v1, ebb2(v11) + // brnz v1, block2(v11) // - // This way the interference with the EBB parameter is avoided. + // This way the interference with the block parameter is avoided. // // A live range for the new value is created while the live range for `pred_val` is left // unaltered. @@ -339,7 +339,7 @@ impl<'a> Context<'a> { // The new argument value is returned. fn isolate_arg( &mut self, - pred_ebb: Ebb, + pred_block: Block, pred_inst: Inst, argnum: usize, pred_val: Value, @@ -360,14 +360,14 @@ impl<'a> Context<'a> { ); self.liveness.create_dead(copy, inst, affinity); self.liveness - .extend_locally(copy, pred_ebb, pred_inst, &pos.func.layout); + .extend_locally(copy, pred_block, pred_inst, &pos.func.layout); pos.func.dfg.inst_variable_args_mut(pred_inst)[argnum] = copy; debug!( "-> inserted {}, before {}: {}", pos.display_inst(inst), - pred_ebb, + pred_block, pos.display_inst(pred_inst) ); @@ -377,7 +377,7 @@ impl<'a> Context<'a> { /// Finish the union-find part of the coalescing algorithm. /// /// This builds the initial set of virtual registers as the transitive/reflexive/symmetric - /// closure of the relation formed by EBB parameter-argument pairs found by `union_find_ebb()`. + /// closure of the relation formed by block parameter-argument pairs found by `union_find_block()`. fn finish_union_find(&mut self) { self.virtregs.finish_union_find(None); debug!("After union-find phase:{}", self.virtregs); @@ -430,7 +430,7 @@ impl<'a> Context<'a> { // Check for interference between `parent` and `value`. Since `parent` dominates // `value`, we only have to check if it overlaps the definition. - if self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout) { + if self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout) { // The two values are interfering, so they can't be in the same virtual register. debug!("-> interference: {} overlaps def of {}", parent, value); return false; @@ -470,9 +470,9 @@ impl<'a> Context<'a> { } } - /// Merge EBB parameter value `param` with virtual registers at its predecessors. + /// Merge block parameter value `param` with virtual registers at its predecessors. fn merge_param(&mut self, param: Value) { - let (ebb, argnum) = match self.func.dfg.value_def(param) { + let (block, argnum) = match self.func.dfg.value_def(param) { ir::ValueDef::Param(e, n) => (e, n), ir::ValueDef::Result(_, _) => panic!("Expected parameter"), }; @@ -493,12 +493,12 @@ impl<'a> Context<'a> { // not loop backedges. debug_assert!(self.predecessors.is_empty()); debug_assert!(self.backedges.is_empty()); - for BasicBlock { - ebb: pred_ebb, + for BlockPredecessor { + block: pred_block, inst: pred_inst, - } in self.cfg.pred_iter(ebb) + } in self.cfg.pred_iter(block) { - if self.preorder.dominates(ebb, pred_ebb) { + if self.preorder.dominates(block, pred_block) { self.backedges.push(pred_inst); } else { self.predecessors.push(pred_inst); @@ -522,8 +522,8 @@ impl<'a> Context<'a> { } // Can't merge because of interference. Insert a copy instead. - let pred_ebb = self.func.layout.pp_ebb(pred_inst); - let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg); + let pred_block = self.func.layout.pp_block(pred_inst); + let new_arg = self.isolate_arg(pred_block, pred_inst, argnum, arg); self.virtregs .insert_single(param, new_arg, self.func, self.preorder); } @@ -616,12 +616,12 @@ impl<'a> Context<'a> { // Check if the parent value interferes with the virtual copy. let inst = node.def.unwrap_inst(); if node.set_id != parent.set_id - && self.liveness[parent.value].reaches_use(inst, node.ebb, &self.func.layout) + && self.liveness[parent.value].reaches_use(inst, node.block, &self.func.layout) { debug!( " - interference: {} overlaps vcopy at {}:{}", parent, - node.ebb, + node.block, self.func.dfg.display_inst(inst, self.isa) ); return false; @@ -640,7 +640,7 @@ impl<'a> Context<'a> { // Both node and parent are values, so check for interference. debug_assert!(node.is_value() && parent.is_value()); if node.set_id != parent.set_id - && self.liveness[parent.value].overlaps_def(node.def, node.ebb, &self.func.layout) + && self.liveness[parent.value].overlaps_def(node.def, node.block, &self.func.layout) { // The two values are interfering. debug!(" - interference: {} overlaps def of {}", parent, node.value); @@ -663,7 +663,7 @@ impl<'a> Context<'a> { /// /// The idea of a dominator forest was introduced on the Budimlic paper and the linear stack /// representation in the Boissinot paper. Our version of the linear stack is slightly modified -/// because we have a pre-order of the dominator tree at the EBB granularity, not basic block +/// because we have a pre-order of the dominator tree at the block granularity, not basic block /// granularity. /// /// Values are pushed in dominator tree pre-order of their definitions, and for each value pushed, @@ -673,7 +673,7 @@ struct DomForest { // Stack representing the rightmost edge of the dominator forest so far, ending in the last // element of `values`. // - // At all times, the EBB of each element in the stack dominates the EBB of the next one. + // At all times, the block of each element in the stack dominates the block of the next one. stack: Vec, } @@ -683,8 +683,8 @@ struct DomForest { struct Node { /// The program point where the live range is defined. def: ExpandedProgramPoint, - /// EBB containing `def`. - ebb: Ebb, + /// block containing `def`. + block: Block, /// Is this a virtual copy or a value? is_vcopy: bool, /// Set identifier. @@ -698,10 +698,10 @@ impl Node { /// Create a node representing `value`. pub fn value(value: Value, set_id: u8, func: &Function) -> Self { let def = func.dfg.value_def(value).pp(); - let ebb = func.layout.pp_ebb(def); + let block = func.layout.pp_block(def); Self { def, - ebb, + block, is_vcopy: false, set_id, value, @@ -711,10 +711,10 @@ impl Node { /// Create a node representing a virtual copy. pub fn vcopy(branch: Inst, value: Value, set_id: u8, func: &Function) -> Self { let def = branch.into(); - let ebb = func.layout.pp_ebb(def); + let block = func.layout.pp_block(def); Self { def, - ebb, + block, is_vcopy: true, set_id, value, @@ -730,9 +730,9 @@ impl Node { impl fmt::Display for Node { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_vcopy { - write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.ebb) + write!(f, "{}:vcopy({})@{}", self.set_id, self.value, self.block) } else { - write!(f, "{}:{}@{}", self.set_id, self.value, self.ebb) + write!(f, "{}:{}@{}", self.set_id, self.value, self.block) } } } @@ -760,16 +760,16 @@ impl DomForest { preorder: &DominatorTreePreorder, ) -> Option { // The stack contains the current sequence of dominating defs. Pop elements until we - // find one whose EBB dominates `node.ebb`. + // find one whose block dominates `node.block`. while let Some(top) = self.stack.pop() { - if preorder.dominates(top.ebb, node.ebb) { + if preorder.dominates(top.block, node.block) { // This is the right insertion spot for `node`. self.stack.push(top); self.stack.push(node); - // We know here that `top.ebb` dominates `node.ebb`, and thus `node.def`. This does + // We know here that `top.block` dominates `node.block`, and thus `node.def`. This does // not necessarily mean that `top.def` dominates `node.def`, though. The `top.def` - // program point may be below the last branch in `top.ebb` that dominates + // program point may be below the last branch in `top.block` that dominates // `node.def`. // // We do know, though, that if there is a nearest value dominating `node.def`, it @@ -777,16 +777,16 @@ impl DomForest { // dominates. let mut last_dom = node.def; for &n in self.stack.iter().rev().skip(1) { - // If the node is defined at the EBB header, it does in fact dominate + // If the node is defined at the block header, it does in fact dominate // everything else pushed on the stack. let def_inst = match n.def { - ExpandedProgramPoint::Ebb(_) => return Some(n), + ExpandedProgramPoint::Block(_) => return Some(n), ExpandedProgramPoint::Inst(i) => i, }; - // We need to find the last program point in `n.ebb` to dominate `node.def`. - last_dom = match domtree.last_dominator(n.ebb, last_dom, &func.layout) { - None => n.ebb.into(), + // We need to find the last program point in `n.block` to dominate `node.def`. + last_dom = match domtree.last_dominator(n.block, last_dom, &func.layout) { + None => n.block.into(), Some(inst) => { if func.layout.cmp(def_inst, inst) != cmp::Ordering::Greater { return Some(n); @@ -816,18 +816,18 @@ impl DomForest { /// When building a full virtual register at once, like phase 1 does with union-find, it is good /// enough to check for interference between the values in the full virtual register like /// `check_vreg()` does. However, in phase 2 we are doing pairwise merges of partial virtual -/// registers that don't represent the full transitive closure of the EBB argument-parameter +/// registers that don't represent the full transitive closure of the block argument-parameter /// relation. This means that just checking for interference between values is inadequate. /// /// Example: /// /// v1 = iconst.i32 1 -/// brnz v10, ebb1(v1) +/// brnz v10, block1(v1) /// v2 = iconst.i32 2 -/// brnz v11, ebb1(v2) +/// brnz v11, block1(v2) /// return v1 /// -/// ebb1(v3: i32): +/// block1(v3: i32): /// v4 = iadd v3, v1 /// /// With just value interference checking, we could build the virtual register [v3, v1] since those @@ -835,13 +835,13 @@ impl DomForest { /// interfere. However, we can't resolve that interference either by inserting a copy: /// /// v1 = iconst.i32 1 -/// brnz v10, ebb1(v1) +/// brnz v10, block1(v1) /// v2 = iconst.i32 2 /// v20 = copy v2 <-- new value -/// brnz v11, ebb1(v20) +/// brnz v11, block1(v20) /// return v1 /// -/// ebb1(v3: i32): +/// block1(v3: i32): /// v4 = iadd v3, v1 /// /// The new value v20 still interferes with v1 because v1 is live across the "brnz v11" branch. We @@ -851,32 +851,32 @@ impl DomForest { /// instructions, then attempting to delete the copies. This is quite expensive because it involves /// creating a large number of copies and value. /// -/// We'll detect this form of interference with *virtual copies*: Each EBB parameter value that -/// hasn't yet been fully merged with its EBB argument values is given a set of virtual copies at +/// We'll detect this form of interference with *virtual copies*: Each block parameter value that +/// hasn't yet been fully merged with its block argument values is given a set of virtual copies at /// the predecessors. Any candidate value to be merged is checked for interference against both the /// virtual register and the virtual copies. /// /// In the general case, we're checking if two virtual registers can be merged, and both can -/// contain incomplete EBB parameter values with associated virtual copies. +/// contain incomplete block parameter values with associated virtual copies. /// /// The `VirtualCopies` struct represents a set of incomplete parameters and their associated /// virtual copies. Given two virtual registers, it can produce an ordered sequence of nodes /// representing the virtual copies in both vregs. struct VirtualCopies { - // Incomplete EBB parameters. These don't need to belong to the same virtual register. + // Incomplete block parameters. These don't need to belong to the same virtual register. params: Vec, - // Set of `(branch, destination)` pairs. These are all the predecessor branches for the EBBs + // Set of `(branch, destination)` pairs. These are all the predecessor branches for the blocks // whose parameters can be found in `params`. // // Ordered by dominator tree pre-order of the branch instructions. - branches: Vec<(Inst, Ebb)>, + branches: Vec<(Inst, Block)>, // Filter for the currently active node iterator. // - // An ebb => (set_id, num) entry means that branches to `ebb` are active in `set_id` with + // An block => (set_id, num) entry means that branches to `block` are active in `set_id` with // branch argument number `num`. - filter: FxHashMap, + filter: FxHashMap, } impl VirtualCopies { @@ -901,7 +901,7 @@ impl VirtualCopies { /// /// The values are assumed to be in domtree pre-order. /// - /// This will extract the EBB parameter values and associate virtual copies all of them. + /// This will extract the block parameter values and associate virtual copies all of them. pub fn initialize( &mut self, values: &[Value], @@ -911,29 +911,29 @@ impl VirtualCopies { ) { self.clear(); - let mut last_ebb = None; + let mut last_block = None; for &val in values { - if let ir::ValueDef::Param(ebb, _) = func.dfg.value_def(val) { + if let ir::ValueDef::Param(block, _) = func.dfg.value_def(val) { self.params.push(val); - // We may have multiple parameters from the same EBB, but we only need to collect + // We may have multiple parameters from the same block, but we only need to collect // predecessors once. Also verify the ordering of values. - if let Some(last) = last_ebb { - match preorder.pre_cmp_ebb(last, ebb) { + if let Some(last) = last_block { + match preorder.pre_cmp_block(last, block) { cmp::Ordering::Less => {} cmp::Ordering::Equal => continue, cmp::Ordering::Greater => panic!("values in wrong order"), } } - // This EBB hasn't been seen before. - for BasicBlock { + // This block hasn't been seen before. + for BlockPredecessor { inst: pred_inst, .. - } in cfg.pred_iter(ebb) + } in cfg.pred_iter(block) { - self.branches.push((pred_inst, ebb)); + self.branches.push((pred_inst, block)); } - last_ebb = Some(ebb); + last_block = Some(block); } } @@ -953,7 +953,7 @@ impl VirtualCopies { debug_assert_eq!(popped, Some(param)); // The domtree pre-order in `self.params` guarantees that all parameters defined at the - // same EBB will be adjacent. This means we can see when all parameters at an EBB have been + // same block will be adjacent. This means we can see when all parameters at an block have been // merged. // // We don't care about the last parameter - when that is merged we are done. @@ -961,16 +961,16 @@ impl VirtualCopies { None => return, Some(x) => *x, }; - let ebb = func.dfg.value_def(param).unwrap_ebb(); - if func.dfg.value_def(last).unwrap_ebb() == ebb { - // We're not done with `ebb` parameters yet. + let block = func.dfg.value_def(param).unwrap_block(); + if func.dfg.value_def(last).unwrap_block() == block { + // We're not done with `block` parameters yet. return; } - // Alright, we know there are no remaining `ebb` parameters in `self.params`. This means we - // can get rid of the `ebb` predecessors in `self.branches`. We don't have to, the + // Alright, we know there are no remaining `block` parameters in `self.params`. This means we + // can get rid of the `block` predecessors in `self.branches`. We don't have to, the // `VCopyIter` will just skip them, but this reduces its workload. - self.branches.retain(|&(_, dest)| dest != ebb); + self.branches.retain(|&(_, dest)| dest != block); } /// Set a filter for the virtual copy nodes we're generating. @@ -991,28 +991,28 @@ impl VirtualCopies { // removed from the back once they are fully merged. This means we can stop looking for // parameters once we're beyond the last one. let last_param = *self.params.last().expect("No more parameters"); - let limit = func.dfg.value_def(last_param).unwrap_ebb(); + let limit = func.dfg.value_def(last_param).unwrap_block(); for (set_id, repr) in reprs.iter().enumerate() { let set_id = set_id as u8; for &value in virtregs.congruence_class(repr) { - if let ir::ValueDef::Param(ebb, num) = func.dfg.value_def(value) { - if preorder.pre_cmp_ebb(ebb, limit) == cmp::Ordering::Greater { + if let ir::ValueDef::Param(block, num) = func.dfg.value_def(value) { + if preorder.pre_cmp_block(block, limit) == cmp::Ordering::Greater { // Stop once we're outside the bounds of `self.params`. break; } - self.filter.insert(ebb, (set_id, num)); + self.filter.insert(block, (set_id, num)); } } } } - /// Look up the set_id and argument number for `ebb` in the current filter. + /// Look up the set_id and argument number for `block` in the current filter. /// - /// Returns `None` if none of the currently active parameters are defined at `ebb`. Otherwise - /// returns `(set_id, argnum)` for an active parameter defined at `ebb`. - fn lookup(&self, ebb: Ebb) -> Option<(u8, usize)> { - self.filter.get(&ebb).cloned() + /// Returns `None` if none of the currently active parameters are defined at `block`. Otherwise + /// returns `(set_id, argnum)` for an active parameter defined at `block`. + fn lookup(&self, block: Block) -> Option<(u8, usize)> { + self.filter.get(&block).cloned() } /// Get an iterator of dom-forest nodes corresponding to the current filter. @@ -1032,7 +1032,7 @@ impl VirtualCopies { struct VCopyIter<'a> { func: &'a Function, vcopies: &'a VirtualCopies, - branches: slice::Iter<'a, (Inst, Ebb)>, + branches: slice::Iter<'a, (Inst, Block)>, } impl<'a> Iterator for VCopyIter<'a> { @@ -1090,7 +1090,7 @@ where (Some(a), Some(b)) => { let layout = self.layout; self.preorder - .pre_cmp_ebb(a.ebb, b.ebb) + .pre_cmp_block(a.block, b.block) .then_with(|| layout.cmp(a.def, b.def)) } (Some(_), None) => cmp::Ordering::Less, diff --git a/cranelift/codegen/src/regalloc/coloring.rs b/cranelift/codegen/src/regalloc/coloring.rs index 347aec9ade..eb3cb513c8 100644 --- a/cranelift/codegen/src/regalloc/coloring.rs +++ b/cranelift/codegen/src/regalloc/coloring.rs @@ -24,8 +24,8 @@ //! a register. //! //! 5. The code must be in Conventional SSA form. Among other things, this means that values passed -//! as arguments when branching to an EBB must belong to the same virtual register as the -//! corresponding EBB argument value. +//! as arguments when branching to an block must belong to the same virtual register as the +//! corresponding block argument value. //! //! # Iteration order //! @@ -35,10 +35,10 @@ //! defined by the instruction and only consider the colors of other values that are live at the //! instruction. //! -//! The first time we see a branch to an EBB, the EBB's argument values are colored to match the +//! The first time we see a branch to an block, the block's argument values are colored to match the //! registers currently holding branch argument values passed to the predecessor branch. By -//! visiting EBBs in a CFG topological order, we guarantee that at least one predecessor branch has -//! been visited before the destination EBB. Therefore, the EBB's arguments are already colored. +//! visiting blocks in a CFG topological order, we guarantee that at least one predecessor branch has +//! been visited before the destination block. Therefore, the block's arguments are already colored. //! //! The exception is the entry block whose arguments are colored from the ABI requirements. @@ -46,7 +46,7 @@ use crate::cursor::{Cursor, EncCursor}; use crate::dominator_tree::DominatorTree; use crate::flowgraph::ControlFlowGraph; use crate::ir::{ArgumentLoc, InstBuilder, ValueDef}; -use crate::ir::{Ebb, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc}; +use crate::ir::{Block, Function, Inst, InstructionData, Layout, Opcode, SigRef, Value, ValueLoc}; use crate::isa::{regs_overlap, RegClass, RegInfo, RegUnit}; use crate::isa::{ConstraintKind, EncInfo, OperandConstraint, RecipeConstraints, TargetIsa}; use crate::packed_option::PackedOption; @@ -168,20 +168,20 @@ impl<'a> Context<'a> { .resize(self.cur.func.dfg.num_values()); // Visit blocks in reverse post-order. We need to ensure that at least one predecessor has - // been visited before each EBB. That guarantees that the EBB arguments have been colored. - for &ebb in self.domtree.cfg_postorder().iter().rev() { - self.visit_ebb(ebb, tracker); + // been visited before each block. That guarantees that the block arguments have been colored. + for &block in self.domtree.cfg_postorder().iter().rev() { + self.visit_block(block, tracker); } } - /// Visit `ebb`, assuming that the immediate dominator has already been visited. - fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) { - debug!("Coloring {}:", ebb); - let mut regs = self.visit_ebb_header(ebb, tracker); + /// Visit `block`, assuming that the immediate dominator has already been visited. + fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) { + debug!("Coloring {}:", block); + let mut regs = self.visit_block_header(block, tracker); tracker.drop_dead_params(); - // Now go through the instructions in `ebb` and color the values they define. - self.cur.goto_top(ebb); + // Now go through the instructions in `block` and color the values they define. + self.cur.goto_top(block); while let Some(inst) = self.cur.next_inst() { self.cur.use_srcloc(inst); let opcode = self.cur.func.dfg[inst].opcode(); @@ -204,7 +204,7 @@ impl<'a> Context<'a> { tracker.drop_dead(inst); // We are not able to insert any regmove for diversion or un-diversion after the first - // branch. Instead, we record the diversion to be restored at the entry of the next EBB, + // branch. Instead, we record the diversion to be restored at the entry of the next block, // which should have a single predecessor. if opcode.is_branch() { // The next instruction is necessarily an unconditional branch. @@ -221,15 +221,15 @@ impl<'a> Context<'a> { "unexpected instruction {} after a conditional branch", self.cur.display_inst(branch) ), - SingleDest(ebb, _) => ebb, + SingleDest(block, _) => block, }; - // We have a single branch with a single target, and an EBB with a single - // predecessor. Thus we can forward the diversion set to the next EBB. + // We have a single branch with a single target, and an block with a single + // predecessor. Thus we can forward the diversion set to the next block. if self.cfg.pred_iter(target).count() == 1 { - // Transfer the diversion to the next EBB. + // Transfer the diversion to the next block. self.divert - .save_for_ebb(&mut self.cur.func.entry_diversions, target); + .save_for_block(&mut self.cur.func.entry_diversions, target); debug!( "Set entry-diversion for {} to\n {}", target, @@ -253,13 +253,17 @@ impl<'a> Context<'a> { } } - /// Visit the `ebb` header. + /// Visit the `block` header. /// - /// Initialize the set of live registers and color the arguments to `ebb`. - fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) -> AvailableRegs { - // Reposition the live value tracker and deal with the EBB arguments. - tracker.ebb_top( - ebb, + /// Initialize the set of live registers and color the arguments to `block`. + fn visit_block_header( + &mut self, + block: Block, + tracker: &mut LiveValueTracker, + ) -> AvailableRegs { + // Reposition the live value tracker and deal with the block arguments. + tracker.block_top( + block, &self.cur.func.dfg, self.liveness, &self.cur.func.layout, @@ -268,18 +272,18 @@ impl<'a> Context<'a> { // Copy the content of the registered diversions to be reused at the // entry of this basic block. - self.divert.at_ebb(&self.cur.func.entry_diversions, ebb); + self.divert.at_block(&self.cur.func.entry_diversions, block); debug!( "Start {} with entry-diversion set to\n {}", - ebb, + block, self.divert.display(&self.reginfo) ); - if self.cur.func.layout.entry_block() == Some(ebb) { + if self.cur.func.layout.entry_block() == Some(block) { // Parameters on the entry block have ABI constraints. self.color_entry_params(tracker.live()) } else { - // The live-ins and parameters of a non-entry EBB have already been assigned a register. + // The live-ins and parameters of a non-entry block have already been assigned a register. // Reconstruct the allocatable set. self.livein_regs(tracker.live()) } @@ -288,7 +292,7 @@ impl<'a> Context<'a> { /// Initialize a set of allocatable registers from the values that are live-in to a block. /// These values must already be colored when the dominating blocks were processed. /// - /// Also process the EBB arguments which were colored when the first predecessor branch was + /// Also process the block arguments which were colored when the first predecessor branch was /// encountered. fn livein_regs(&self, live: &[LiveValue]) -> AvailableRegs { // Start from the registers that are actually usable. We don't want to include any reserved @@ -428,7 +432,7 @@ impl<'a> Context<'a> { regs.input.display(&self.reginfo), ); - // EBB whose arguments should be colored to match the current branch instruction's + // block whose arguments should be colored to match the current branch instruction's // arguments. let mut color_dest_args = None; @@ -446,10 +450,10 @@ impl<'a> Context<'a> { self.program_input_abi(inst, AbiParams::Returns); } else if self.cur.func.dfg[inst].opcode().is_branch() { // This is a branch, so we need to make sure that globally live values are in their - // global registers. For EBBs that take arguments, we also need to place the argument + // global registers. For blocks that take arguments, we also need to place the argument // values in the expected registers. if let Some(dest) = self.cur.func.dfg[inst].branch_destination() { - if self.program_ebb_arguments(inst, dest) { + if self.program_block_arguments(inst, dest) { color_dest_args = Some(dest); } } else { @@ -458,7 +462,7 @@ impl<'a> Context<'a> { debug_assert_eq!( self.cur.func.dfg.inst_variable_args(inst).len(), 0, - "Can't handle EBB arguments: {}", + "Can't handle block arguments: {}", self.cur.display_inst(inst) ); self.undivert_regs(|lr, _| !lr.is_local()); @@ -576,7 +580,7 @@ impl<'a> Context<'a> { // If this is the first time we branch to `dest`, color its arguments to match the current // register state. if let Some(dest) = color_dest_args { - self.color_ebb_params(inst, dest); + self.color_block_params(inst, dest); } // Apply the solution to the defs. @@ -727,7 +731,7 @@ impl<'a> Context<'a> { // This code runs after calling `solver.inputs_done()` so we must identify // the new variable as killed or live-through. let layout = &self.cur.func.layout; - if self.liveness[arg_val].killed_at(inst, layout.pp_ebb(inst), layout) { + if self.liveness[arg_val].killed_at(inst, layout.pp_block(inst), layout) { self.solver .add_killed_var(arg_val, constraint.regclass, cur_reg); } else { @@ -747,12 +751,12 @@ impl<'a> Context<'a> { /// /// 1. Any values that are live-in to `dest` must be un-diverted so they live in their globally /// assigned register. - /// 2. If the `dest` EBB takes arguments, reassign the branch argument values to the matching + /// 2. If the `dest` block takes arguments, reassign the branch argument values to the matching /// registers. /// /// Returns true if this is the first time a branch to `dest` is seen, so the `dest` argument /// values should be colored after `shuffle_inputs`. - fn program_ebb_arguments(&mut self, inst: Inst, dest: Ebb) -> bool { + fn program_block_arguments(&mut self, inst: Inst, dest: Block) -> bool { // Find diverted registers that are live-in to `dest` and reassign them to their global // home. // @@ -760,9 +764,9 @@ impl<'a> Context<'a> { // arguments, so they can't always be un-diverted. self.undivert_regs(|lr, layout| lr.is_livein(dest, layout)); - // Now handle the EBB arguments. + // Now handle the block arguments. let br_args = self.cur.func.dfg.inst_variable_args(inst); - let dest_args = self.cur.func.dfg.ebb_params(dest); + let dest_args = self.cur.func.dfg.block_params(dest); debug_assert_eq!(br_args.len(), dest_args.len()); for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) { // The first time we encounter a branch to `dest`, we get to pick the location. The @@ -771,7 +775,7 @@ impl<'a> Context<'a> { ValueLoc::Unassigned => { // This is the first branch to `dest`, so we should color `dest_arg` instead of // `br_arg`. However, we don't know where `br_arg` will end up until - // after `shuffle_inputs`. See `color_ebb_params` below. + // after `shuffle_inputs`. See `color_block_params` below. // // It is possible for `dest_arg` to have no affinity, and then it should simply // be ignored. @@ -804,10 +808,10 @@ impl<'a> Context<'a> { /// Knowing that we've never seen a branch to `dest` before, color its parameters to match our /// register state. /// - /// This function is only called when `program_ebb_arguments()` returned `true`. - fn color_ebb_params(&mut self, inst: Inst, dest: Ebb) { + /// This function is only called when `program_block_arguments()` returned `true`. + fn color_block_params(&mut self, inst: Inst, dest: Block) { let br_args = self.cur.func.dfg.inst_variable_args(inst); - let dest_args = self.cur.func.dfg.ebb_params(dest); + let dest_args = self.cur.func.dfg.block_params(dest); debug_assert_eq!(br_args.len(), dest_args.len()); for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) { match self.cur.func.locations[dest_arg] { @@ -818,7 +822,7 @@ impl<'a> Context<'a> { } } ValueLoc::Reg(_) => panic!("{} arg {} already colored", dest, dest_arg), - // Spilled value consistency is verified by `program_ebb_arguments()` above. + // Spilled value consistency is verified by `program_block_arguments()` above. ValueLoc::Stack(_) => {} } } @@ -1082,7 +1086,7 @@ impl<'a> Context<'a> { /// Determine if `value` is live on a CFG edge from the current instruction. /// /// This means that the current instruction is a branch and `value` is live in to one of the - /// branch destinations. Branch arguments and EBB parameters are not considered live on the + /// branch destinations. Branch arguments and block parameters are not considered live on the /// edge. fn is_live_on_outgoing_edge(&self, value: Value) -> bool { use crate::ir::instructions::BranchInfo::*; @@ -1091,17 +1095,17 @@ impl<'a> Context<'a> { let layout = &self.cur.func.layout; match self.cur.func.dfg.analyze_branch(inst) { NotABranch => false, - SingleDest(ebb, _) => { + SingleDest(block, _) => { let lr = &self.liveness[value]; - lr.is_livein(ebb, layout) + lr.is_livein(block, layout) } - Table(jt, ebb) => { + Table(jt, block) => { let lr = &self.liveness[value]; !lr.is_local() - && (ebb.map_or(false, |ebb| lr.is_livein(ebb, layout)) + && (block.map_or(false, |block| lr.is_livein(block, layout)) || self.cur.func.jump_tables[jt] .iter() - .any(|ebb| lr.is_livein(*ebb, layout))) + .any(|block| lr.is_livein(*block, layout))) } } } @@ -1232,7 +1236,7 @@ impl<'a> Context<'a> { self.liveness.create_dead(local, inst, lv.affinity); self.liveness.extend_locally( local, - self.cur.func.layout.pp_ebb(inst), + self.cur.func.layout.pp_block(inst), copy, &self.cur.func.layout, ); diff --git a/cranelift/codegen/src/regalloc/diversion.rs b/cranelift/codegen/src/regalloc/diversion.rs index feb9c6f0ef..e3bacbae72 100644 --- a/cranelift/codegen/src/regalloc/diversion.rs +++ b/cranelift/codegen/src/regalloc/diversion.rs @@ -4,12 +4,12 @@ //! Sometimes, it is necessary to move register values to a different register in order to satisfy //! instruction constraints. //! -//! These register diversions are local to an EBB. No values can be diverted when entering a new -//! EBB. +//! These register diversions are local to an block. No values can be diverted when entering a new +//! block. use crate::fx::FxHashMap; use crate::hash_map::{Entry, Iter}; -use crate::ir::{Ebb, StackSlot, Value, ValueLoc, ValueLocations}; +use crate::ir::{Block, StackSlot, Value, ValueLoc, ValueLocations}; use crate::ir::{InstructionData, Opcode}; use crate::isa::{RegInfo, RegUnit}; use core::fmt; @@ -38,22 +38,22 @@ impl Diversion { } } -/// Keep track of diversions in an EBB. +/// Keep track of diversions in an block. #[derive(Clone)] pub struct RegDiversions { current: FxHashMap, } -/// Keep track of diversions at the entry of EBB. +/// Keep track of diversions at the entry of block. #[derive(Clone)] struct EntryRegDiversionsValue { - key: Ebb, + key: Block, divert: RegDiversions, } -/// Map EBB to their matching RegDiversions at basic blocks entry. +/// Map block to their matching RegDiversions at basic blocks entry. pub struct EntryRegDiversions { - map: SparseMap, + map: SparseMap, } impl RegDiversions { @@ -178,22 +178,22 @@ impl RegDiversions { } /// Resets the state of the current diversions to the recorded diversions at the entry of the - /// given `ebb`. The recoded diversions is available after coloring on `func.entry_diversions` + /// given `block`. The recoded diversions is available after coloring on `func.entry_diversions` /// field. - pub fn at_ebb(&mut self, entry_diversions: &EntryRegDiversions, ebb: Ebb) { + pub fn at_block(&mut self, entry_diversions: &EntryRegDiversions, block: Block) { self.clear(); - if let Some(entry_divert) = entry_diversions.map.get(ebb) { + if let Some(entry_divert) = entry_diversions.map.get(block) { let iter = entry_divert.divert.current.iter(); self.current.extend(iter); } } - /// Copy the current state of the diversions, and save it for the entry of the `ebb` given as + /// Copy the current state of the diversions, and save it for the entry of the `block` given as /// argument. /// - /// Note: This function can only be called once on an `ebb` with a given `entry_diversions` + /// Note: This function can only be called once on a `Block` with a given `entry_diversions` /// argument, otherwise it would panic. - pub fn save_for_ebb(&mut self, entry_diversions: &mut EntryRegDiversions, target: Ebb) { + pub fn save_for_block(&mut self, entry_diversions: &mut EntryRegDiversions, target: Block) { // No need to save anything if there is no diversions to be recorded. if self.is_empty() { return; @@ -208,9 +208,9 @@ impl RegDiversions { }); } - /// Check that the recorded entry for a given `ebb` matches what is recorded in the + /// Check that the recorded entry for a given `block` matches what is recorded in the /// `entry_diversions`. - pub fn check_ebb_entry(&self, entry_diversions: &EntryRegDiversions, target: Ebb) -> bool { + pub fn check_block_entry(&self, entry_diversions: &EntryRegDiversions, target: Block) -> bool { let entry_divert = match entry_diversions.map.get(target) { Some(entry_divert) => entry_divert, None => return self.is_empty(), @@ -235,7 +235,7 @@ impl RegDiversions { } impl EntryRegDiversions { - /// Create a new empty entry diversion, to associate diversions to each EBB entry. + /// Create a new empty entry diversion, to associate diversions to each block entry. pub fn new() -> Self { Self { map: SparseMap::new(), @@ -259,9 +259,9 @@ impl Clone for EntryRegDiversions { } /// Implement `SparseMapValue`, as required to make use of a `SparseMap` for mapping the entry -/// diversions for each EBB. -impl SparseMapValue for EntryRegDiversionsValue { - fn key(&self) -> Ebb { +/// diversions for each block. +impl SparseMapValue for EntryRegDiversionsValue { + fn key(&self) -> Block { self.key } } diff --git a/cranelift/codegen/src/regalloc/live_value_tracker.rs b/cranelift/codegen/src/regalloc/live_value_tracker.rs index 7faed970a7..f106f4b39d 100644 --- a/cranelift/codegen/src/regalloc/live_value_tracker.rs +++ b/cranelift/codegen/src/regalloc/live_value_tracker.rs @@ -1,13 +1,13 @@ -//! Track which values are live in an EBB with instruction granularity. +//! Track which values are live in an block with instruction granularity. //! -//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an EBB. +//! The `LiveValueTracker` keeps track of the set of live SSA values at each instruction in an block. //! The sets of live values are computed on the fly as the tracker is moved from instruction to -//! instruction, starting at the EBB header. +//! instruction, starting at the block header. use crate::dominator_tree::DominatorTree; use crate::entity::{EntityList, ListPool}; use crate::fx::FxHashMap; -use crate::ir::{DataFlowGraph, Ebb, ExpandedProgramPoint, Inst, Layout, Value}; +use crate::ir::{Block, DataFlowGraph, ExpandedProgramPoint, Inst, Layout, Value}; use crate::partition_slice::partition_slice; use crate::regalloc::affinity::Affinity; use crate::regalloc::liveness::Liveness; @@ -16,13 +16,13 @@ use alloc::vec::Vec; type ValueList = EntityList; -/// Compute and track live values throughout an EBB. +/// Compute and track live values throughout an block. pub struct LiveValueTracker { /// The set of values that are live at the current program point. live: LiveValueVec, /// Saved set of live values for every jump and branch that can potentially be an immediate - /// dominator of an EBB. + /// dominator of an block. /// /// This is the set of values that are live *before* the branch. idom_sets: FxHashMap, @@ -37,7 +37,7 @@ pub struct LiveValue { /// The live value. pub value: Value, - /// The local ending point of the live range in the current EBB, as returned by + /// The local ending point of the live range in the current block, as returned by /// `LiveRange::def_local_end()` or `LiveRange::livein_local_end()`. pub endpoint: Inst, @@ -47,7 +47,7 @@ pub struct LiveValue { /// almost all users of `LiveValue` need to look at it. pub affinity: Affinity, - /// The live range for this value never leaves its EBB. + /// The live range for this value never leaves its block. pub is_local: bool, /// This value is dead - the live range ends immediately. @@ -155,75 +155,75 @@ impl LiveValueTracker { &mut self.live.values } - /// Move the current position to the top of `ebb`. + /// Move the current position to the top of `block`. /// - /// This depends on the stored live value set at `ebb`'s immediate dominator, so that must have + /// This depends on the stored live value set at `block`'s immediate dominator, so that must have /// been visited first. /// /// Returns `(liveins, args)` as a pair of slices. The first slice is the set of live-in values - /// from the immediate dominator. The second slice is the set of `ebb` parameters. + /// from the immediate dominator. The second slice is the set of `block` parameters. /// /// Dead parameters with no uses are included in `args`. Call `drop_dead_args()` to remove them. - pub fn ebb_top( + pub fn block_top( &mut self, - ebb: Ebb, + block: Block, dfg: &DataFlowGraph, liveness: &Liveness, layout: &Layout, domtree: &DominatorTree, ) -> (&[LiveValue], &[LiveValue]) { - // Start over, compute the set of live values at the top of the EBB from two sources: + // Start over, compute the set of live values at the top of the block from two sources: // - // 1. Values that were live before `ebb`'s immediate dominator, filtered for those that are + // 1. Values that were live before `block`'s immediate dominator, filtered for those that are // actually live-in. - // 2. Arguments to `ebb` that are not dead. + // 2. Arguments to `block` that are not dead. // self.live.clear(); // Compute the live-in values. Start by filtering the set of values that were live before // the immediate dominator. Just use the empty set if there's no immediate dominator (i.e., // the entry block or an unreachable block). - if let Some(idom) = domtree.idom(ebb) { + if let Some(idom) = domtree.idom(block) { // If the immediate dominator exits, we must have a stored list for it. This is a - // requirement to the order EBBs are visited: All dominators must have been processed - // before the current EBB. + // requirement to the order blocks are visited: All dominators must have been processed + // before the current block. let idom_live_list = self .idom_sets .get(&idom) .expect("No stored live set for dominator"); - // Get just the values that are live-in to `ebb`. + // Get just the values that are live-in to `block`. for &value in idom_live_list.as_slice(&self.idom_pool) { let lr = liveness .get(value) .expect("Immediate dominator value has no live range"); // Check if this value is live-in here. - if let Some(endpoint) = lr.livein_local_end(ebb, layout) { + if let Some(endpoint) = lr.livein_local_end(block, layout) { self.live.push(value, endpoint, lr); } } } - // Now add all the live parameters to `ebb`. + // Now add all the live parameters to `block`. let first_arg = self.live.values.len(); - for &value in dfg.ebb_params(ebb) { + for &value in dfg.block_params(block) { let lr = &liveness[value]; - debug_assert_eq!(lr.def(), ebb.into()); + debug_assert_eq!(lr.def(), block.into()); match lr.def_local_end().into() { ExpandedProgramPoint::Inst(endpoint) => { self.live.push(value, endpoint, lr); } - ExpandedProgramPoint::Ebb(local_ebb) => { - // This is a dead EBB parameter which is not even live into the first - // instruction in the EBB. + ExpandedProgramPoint::Block(local_block) => { + // This is a dead block parameter which is not even live into the first + // instruction in the block. debug_assert_eq!( - local_ebb, ebb, - "EBB parameter live range ends at wrong EBB header" + local_block, block, + "block parameter live range ends at wrong block header" ); - // Give this value a fake endpoint that is the first instruction in the EBB. + // Give this value a fake endpoint that is the first instruction in the block. // We expect it to be removed by calling `drop_dead_args()`. self.live - .push(value, layout.first_inst(ebb).expect("Empty EBB"), lr); + .push(value, layout.first_inst(block).expect("Empty block"), lr); } } } @@ -274,8 +274,8 @@ impl LiveValueTracker { ExpandedProgramPoint::Inst(endpoint) => { self.live.push(value, endpoint, lr); } - ExpandedProgramPoint::Ebb(ebb) => { - panic!("Instruction result live range can't end at {}", ebb); + ExpandedProgramPoint::Block(block) => { + panic!("Instruction result live range can't end at {}", block); } } } @@ -310,7 +310,7 @@ impl LiveValueTracker { /// Drop any values that are marked as `is_dead`. /// - /// Use this after calling `ebb_top` to clean out dead EBB parameters. + /// Use this after calling `block_top` to clean out dead block parameters. pub fn drop_dead_params(&mut self) { self.live.remove_dead_values(); } diff --git a/cranelift/codegen/src/regalloc/liveness.rs b/cranelift/codegen/src/regalloc/liveness.rs index f195645809..88c106cce4 100644 --- a/cranelift/codegen/src/regalloc/liveness.rs +++ b/cranelift/codegen/src/regalloc/liveness.rs @@ -7,18 +7,18 @@ //! # Liveness consumers //! //! The primary consumer of the liveness analysis is the SSA coloring pass which goes through each -//! EBB and assigns a register to the defined values. This algorithm needs to maintain a set of the -//! currently live values as it is iterating down the instructions in the EBB. It asks the +//! block and assigns a register to the defined values. This algorithm needs to maintain a set of the +//! currently live values as it is iterating down the instructions in the block. It asks the //! following questions: //! -//! - What is the set of live values at the entry to the EBB? -//! - When moving past a use of a value, is that value still alive in the EBB, or was that the last +//! - What is the set of live values at the entry to the block? +//! - When moving past a use of a value, is that value still alive in the block, or was that the last //! use? //! - When moving past a branch, which of the live values are still live below the branch? //! //! The set of `LiveRange` instances can answer these questions through their `def_local_end` and -//! `livein_local_end` queries. The coloring algorithm visits EBBs in a topological order of the -//! dominator tree, so it can compute the set of live values at the beginning of an EBB by starting +//! `livein_local_end` queries. The coloring algorithm visits blocks in a topological order of the +//! dominator tree, so it can compute the set of live values at the beginning of an block by starting //! from the set of live values at the dominating branch instruction and filtering it with //! `livein_local_end`. These sets do not need to be stored in the liveness analysis. //! @@ -43,7 +43,7 @@ //! //! - Quadratic memory use. We need a bit per variable per basic block in the function. //! - Dense representation of sparse data. In practice, the majority of SSA values never leave -//! their basic block, and those that do span basic blocks rarely span a large number of basic +//! their basic block, and those that do spa basic blocks rarely span a large number of basic //! blocks. This makes the data stored in the bitvectors quite sparse. //! - Traditionally, the data-flow equations were solved for real program *variables* which does //! not include temporaries used in evaluating expressions. We have an SSA form program which @@ -141,10 +141,10 @@ //! - The first time a value is encountered, its live range is constructed as a dead live range //! containing only the defining program point. //! - The local interval of the value's live range is extended so it reaches the use. This may -//! require creating a new live-in local interval for the EBB. -//! - If the live range became live-in to the EBB, add the EBB to a work-list. -//! - While the work-list is non-empty pop a live-in EBB and repeat the two steps above, using each -//! of the live-in EBB's CFG predecessor instructions as a 'use'. +//! require creating a new live-in local interval for the block. +//! - If the live range became live-in to the block, add the block to a work-list. +//! - While the work-list is non-empty pop a live-in block and repeat the two steps above, using each +//! of the live-in block's CFG predecessor instructions as a 'use'. //! //! The effect of this algorithm is to extend the live range of each to reach uses as they are //! visited. No data about each value beyond the live range is needed between visiting uses, so @@ -176,9 +176,9 @@ //! There is some room for improvement. use crate::entity::SparseMap; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir::dfg::ValueDef; -use crate::ir::{Ebb, Function, Inst, Layout, ProgramPoint, Value}; +use crate::ir::{Block, Function, Inst, Layout, ProgramPoint, Value}; use crate::isa::{EncInfo, OperandConstraint, TargetIsa}; use crate::regalloc::affinity::Affinity; use crate::regalloc::liverange::LiveRange; @@ -223,14 +223,14 @@ fn get_or_create<'a>( }) .unwrap_or_default(); } - ValueDef::Param(ebb, num) => { - def = ebb.into(); - if func.layout.entry_block() == Some(ebb) { + ValueDef::Param(block, num) => { + def = block.into(); + if func.layout.entry_block() == Some(block) { // The affinity for entry block parameters can be inferred from the function // signature. affinity = Affinity::abi(&func.signature.params[num], isa); } else { - // Give normal EBB parameters a register affinity matching their type. + // Give normal block parameters a register affinity matching their type. let rc = isa.regclass_for_abi_type(func.dfg.value_type(value)); affinity = Affinity::Reg(rc.into()); } @@ -241,43 +241,43 @@ fn get_or_create<'a>( lrset.get_mut(value).unwrap() } -/// Extend the live range for `value` so it reaches `to` which must live in `ebb`. +/// Extend the live range for `value` so it reaches `to` which must live in `block`. fn extend_to_use( lr: &mut LiveRange, - ebb: Ebb, + block: Block, to: Inst, - worklist: &mut Vec, + worklist: &mut Vec, func: &Function, cfg: &ControlFlowGraph, ) { // This is our scratch working space, and we'll leave it empty when we return. debug_assert!(worklist.is_empty()); - // Extend the range locally in `ebb`. + // Extend the range locally in `block`. // If there already was a live interval in that block, we're done. - if lr.extend_in_ebb(ebb, to, &func.layout) { - worklist.push(ebb); + if lr.extend_in_block(block, to, &func.layout) { + worklist.push(block); } - // The work list contains those EBBs where we have learned that the value needs to be + // The work list contains those blocks where we have learned that the value needs to be // live-in. // // This algorithm becomes a depth-first traversal up the CFG, enumerating all paths through the - // CFG from the existing live range to `ebb`. + // CFG from the existing live range to `block`. // // Extend the live range as we go. The live range itself also serves as a visited set since - // `extend_in_ebb` will never return true twice for the same EBB. + // `extend_in_block` will never return true twice for the same block. // while let Some(livein) = worklist.pop() { - // We've learned that the value needs to be live-in to the `livein` EBB. + // We've learned that the value needs to be live-in to the `livein` block. // Make sure it is also live at all predecessor branches to `livein`. - for BasicBlock { - ebb: pred, + for BlockPredecessor { + block: pred, inst: branch, } in cfg.pred_iter(livein) { - if lr.extend_in_ebb(pred, branch, &func.layout) { - // This predecessor EBB also became live-in. We need to process it later. + if lr.extend_in_block(pred, branch, &func.layout) { + // This predecessor block also became live-in. We need to process it later. worklist.push(pred); } } @@ -294,7 +294,7 @@ pub struct Liveness { /// Working space for the `extend_to_use` algorithm. /// This vector is always empty, except for inside that function. /// It lives here to avoid repeated allocation of scratch memory. - worklist: Vec, + worklist: Vec, } impl Liveness { @@ -342,7 +342,7 @@ impl Liveness { /// Move the definition of `value` to `def`. /// - /// The old and new def points must be in the same EBB, and before the end of the live range. + /// The old and new def points must be in the same block, and before the end of the live range. pub fn move_def_locally(&mut self, value: Value, def: PP) where PP: Into, @@ -353,20 +353,20 @@ impl Liveness { /// Locally extend the live range for `value` to reach `user`. /// - /// It is assumed the `value` is already live before `user` in `ebb`. + /// It is assumed the `value` is already live before `user` in `block`. /// /// Returns a mutable reference to the value's affinity in case that also needs to be updated. pub fn extend_locally( &mut self, value: Value, - ebb: Ebb, + block: Block, user: Inst, layout: &Layout, ) -> &mut Affinity { - debug_assert_eq!(Some(ebb), layout.inst_ebb(user)); + debug_assert_eq!(Some(block), layout.inst_block(user)); let lr = self.ranges.get_mut(value).expect("Value has no live range"); - let livein = lr.extend_in_ebb(ebb, user, layout); - debug_assert!(!livein, "{} should already be live in {}", value, ebb); + let livein = lr.extend_in_block(block, user, layout); + debug_assert!(!livein, "{} should already be live in {}", value, block); &mut lr.affinity } @@ -389,15 +389,15 @@ impl Liveness { // The liveness computation needs to visit all uses, but the order doesn't matter. // TODO: Perhaps this traversal of the function could be combined with a dead code // elimination pass if we visit a post-order of the dominator tree? - for ebb in func.layout.ebbs() { - // Make sure we have created live ranges for dead EBB parameters. + for block in func.layout.blocks() { + // Make sure we have created live ranges for dead block parameters. // TODO: If these parameters are really dead, we could remove them, except for the // entry block which must match the function signature. - for &arg in func.dfg.ebb_params(ebb) { + for &arg in func.dfg.block_params(block) { get_or_create(&mut self.ranges, arg, isa, func, &encinfo); } - for inst in func.layout.ebb_insts(ebb) { + for inst in func.layout.block_insts(block) { // Eliminate all value aliases, they would confuse the register allocator. func.dfg.resolve_aliases_in_arguments(inst); @@ -419,11 +419,11 @@ impl Liveness { let lr = get_or_create(&mut self.ranges, arg, isa, func, &encinfo); // Extend the live range to reach this use. - extend_to_use(lr, ebb, inst, &mut self.worklist, func, cfg); + extend_to_use(lr, block, inst, &mut self.worklist, func, cfg); // Apply operand constraint, ignoring any variable arguments after the fixed // operands described by `operand_constraints`. Variable arguments are either - // EBB arguments or call/return ABI arguments. + // block arguments or call/return ABI arguments. if let Some(constraint) = operand_constraints.next() { lr.affinity.merge(constraint, ®info); } diff --git a/cranelift/codegen/src/regalloc/liverange.rs b/cranelift/codegen/src/regalloc/liverange.rs index f49cbcc682..0e2f8385fc 100644 --- a/cranelift/codegen/src/regalloc/liverange.rs +++ b/cranelift/codegen/src/regalloc/liverange.rs @@ -6,29 +6,29 @@ //! //! # Local Live Ranges //! -//! Inside a single extended basic block, the live range of a value is always an interval between -//! two program points (if the value is live in the EBB at all). The starting point is either: +//! Inside a single basic block, the live range of a value is always an interval between +//! two program points (if the value is live in the block at all). The starting point is either: //! //! 1. The instruction that defines the value, or -//! 2. The EBB header, because the value is an argument to the EBB, or -//! 3. The EBB header, because the value is defined in another EBB and live-in to this one. +//! 2. The block header, because the value is an argument to the block, or +//! 3. The block header, because the value is defined in another block and live-in to this one. //! //! The ending point of the local live range is the last of the following program points in the -//! EBB: +//! block: //! -//! 1. The last use in the EBB, where a *use* is an instruction that has the value as an argument. -//! 2. The last branch or jump instruction in the EBB that can reach a use. +//! 1. The last use in the block, where a *use* is an instruction that has the value as an argument. +//! 2. The last branch or jump instruction in the block that can reach a use. //! 3. If the value has no uses anywhere (a *dead value*), the program point that defines it. //! -//! Note that 2. includes loop back-edges to the same EBB. In general, if a value is defined +//! Note that 2. includes loop back-edges to the same block. In general, if a value is defined //! outside a loop and used inside the loop, it will be live in the entire loop. //! //! # Global Live Ranges //! -//! Values that appear in more than one EBB have a *global live range* which can be seen as the -//! disjoint union of the per-EBB local intervals for all of the EBBs where the value is live. -//! Together with a `ProgramOrder` which provides a linear ordering of the EBBs, the global live -//! range becomes a linear sequence of disjoint intervals, at most one per EBB. +//! Values that appear in more than one block have a *global live range* which can be seen as the +//! disjoint union of the per-block local intervals for all of the blocks where the value is live. +//! Together with a `ProgramOrder` which provides a linear ordering of the blocks, the global live +//! range becomes a linear sequence of disjoint intervals, at most one per block. //! //! In the special case of a dead value, the global live range is a single interval where the start //! and end points are the same. The global live range of a value is never completely empty. @@ -64,58 +64,58 @@ //! ## Current representation //! //! Our current implementation uses a sorted array of compressed intervals, represented by their -//! boundaries (Ebb, Inst), sorted by Ebb. This is a simple data structure, enables coalescing of +//! boundaries (Block, Inst), sorted by Block. This is a simple data structure, enables coalescing of //! intervals easily, and shows some nice performance behavior. See //! https://github.com/bytecodealliance/cranelift/issues/1084 for benchmarks against using a -//! bforest::Map. +//! bforest::Map. //! -//! ## EBB ordering +//! ## block ordering //! -//! The relative order of EBBs is used to maintain a sorted list of live-in intervals and to -//! coalesce adjacent live-in intervals when the prior interval covers the whole EBB. This doesn't +//! The relative order of blocks is used to maintain a sorted list of live-in intervals and to +//! coalesce adjacent live-in intervals when the prior interval covers the whole block. This doesn't //! depend on any property of the program order, so alternative orderings are possible: //! -//! 1. The EBB layout order. This is what we currently use. +//! 1. The block layout order. This is what we currently use. //! 2. A topological order of the dominator tree. All the live-in intervals would come after the //! def interval. -//! 3. A numerical order by EBB number. Performant because it doesn't need to indirect through the +//! 3. A numerical order by block number. Performant because it doesn't need to indirect through the //! `ProgramOrder` for comparisons. //! //! These orderings will cause small differences in coalescing opportunities, but all of them would //! do a decent job of compressing a long live range. The numerical order might be preferable //! because: //! -//! - It has better performance because EBB numbers can be compared directly without any table +//! - It has better performance because block numbers can be compared directly without any table //! lookups. -//! - If EBB numbers are not reused, it is safe to allocate new EBBs without getting spurious -//! live-in intervals from any coalesced representations that happen to cross a new EBB. +//! - If block numbers are not reused, it is safe to allocate new blocks without getting spurious +//! live-in intervals from any coalesced representations that happen to cross a new block. //! //! For comparing instructions, the layout order is always what we want. //! //! ## Alternative representation //! -//! Since a local live-in interval always begins at its EBB header, it is uniquely described by its -//! end point instruction alone. We can use the layout to look up the EBB containing the end point. +//! Since a local live-in interval always begins at its block header, it is uniquely described by its +//! end point instruction alone. We can use the layout to look up the block containing the end point. //! This means that a sorted `Vec` would be enough to represent the set of live-in intervals. //! //! Coalescing is an important compression technique because some live ranges can span thousands of -//! EBBs. We can represent that by switching to a sorted `Vec` representation where -//! an `[Ebb, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding -//! `Ebb` entry represents a single live-in interval. +//! blocks. We can represent that by switching to a sorted `Vec` representation where +//! an `[Block, Inst]` pair represents a coalesced range, while an `Inst` entry without a preceding +//! `Block` entry represents a single live-in interval. //! //! This representation is more compact for a live range with many uncoalesced live-in intervals. //! It is more complicated to work with, though, so it is probably not worth it. The performance -//! benefits of switching to a numerical EBB order only appears if the binary search is doing -//! EBB-EBB comparisons. +//! benefits of switching to a numerical block order only appears if the binary search is doing +//! block-block comparisons. //! -//! A `BTreeMap` could have been used for the live-in intervals, but it doesn't provide +//! A `BTreeMap` could have been used for the live-in intervals, but it doesn't provide //! the necessary API to make coalescing easy, nor does it optimize for our types' sizes. //! -//! Even the specialized `bforest::Map` implementation is slower than a plain sorted +//! Even the specialized `bforest::Map` implementation is slower than a plain sorted //! array, see https://github.com/bytecodealliance/cranelift/issues/1084 for details. use crate::entity::SparseMapValue; -use crate::ir::{Ebb, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value}; +use crate::ir::{Block, ExpandedProgramPoint, Inst, Layout, ProgramOrder, ProgramPoint, Value}; use crate::regalloc::affinity::Affinity; use core::cmp::Ordering; use core::marker::PhantomData; @@ -124,14 +124,14 @@ use smallvec::SmallVec; /// Global live range of a single SSA value. /// /// As [explained in the module documentation](index.html#local-live-ranges), the live range of an -/// SSA value is the disjoint union of a set of intervals, each local to a single EBB, and with at -/// most one interval per EBB. We further distinguish between: +/// SSA value is the disjoint union of a set of intervals, each local to a single block, and with at +/// most one interval per block. We further distinguish between: /// -/// 1. The *def interval* is the local interval in the EBB where the value is defined, and -/// 2. The *live-in intervals* are the local intervals in the remaining EBBs. +/// 1. The *def interval* is the local interval in the block where the value is defined, and +/// 2. The *live-in intervals* are the local intervals in the remaining blocks. /// -/// A live-in interval always begins at the EBB header, while the def interval can begin at the -/// defining instruction, or at the EBB header for an EBB argument value. +/// A live-in interval always begins at the block header, while the def interval can begin at the +/// defining instruction, or at the block header for an block argument value. /// /// All values have a def interval, but a large proportion of values don't have any live-in /// intervals. These are called *local live ranges*. @@ -139,11 +139,11 @@ use smallvec::SmallVec; /// # Program order requirements /// /// The internal representation of a `LiveRange` depends on a consistent `ProgramOrder` both for -/// ordering instructions inside an EBB *and* for ordering EBBs. The methods that depend on the +/// ordering instructions inside an block *and* for ordering blocks. The methods that depend on the /// ordering take an explicit `ProgramOrder` object, and it is the caller's responsibility to /// ensure that the provided ordering is consistent between calls. /// -/// In particular, changing the order of EBBs or inserting new EBBs will invalidate live ranges. +/// In particular, changing the order of blocks or inserting new blocks will invalidate live ranges. /// /// Inserting new instructions in the layout is safe, but removing instructions is not. Besides the /// instructions using or defining their value, `LiveRange` structs can contain references to @@ -152,7 +152,7 @@ pub type LiveRange = GenericLiveRange; // See comment of liveins below. pub struct Interval { - begin: Ebb, + begin: Block, end: Inst, } @@ -168,10 +168,10 @@ pub struct GenericLiveRange { /// The preferred register allocation for this value. pub affinity: Affinity, - /// The instruction or EBB header where this value is defined. + /// The instruction or block header where this value is defined. def_begin: ProgramPoint, - /// The end point of the def interval. This must always belong to the same EBB as `def_begin`. + /// The end point of the def interval. This must always belong to the same block as `def_begin`. /// /// We always have `def_begin <= def_end` with equality implying a dead def live range with no /// uses. @@ -179,12 +179,12 @@ pub struct GenericLiveRange { /// Additional live-in intervals sorted in program order. /// - /// This vector is empty for most values which are only used in one EBB. + /// This vector is empty for most values which are only used in one block. /// - /// An entry `ebb -> inst` means that the live range is live-in to `ebb`, continuing up to - /// `inst` which may belong to a later EBB in the program order. + /// An entry `block -> inst` means that the live range is live-in to `block`, continuing up to + /// `inst` which may belong to a later block in the program order. /// - /// The entries are non-overlapping, and none of them overlap the EBB where the value is + /// The entries are non-overlapping, and none of them overlap the block where the value is /// defined. liveins: SmallVec<[Interval; 2]>, @@ -210,7 +210,7 @@ macro_rules! cmp { impl GenericLiveRange { /// Create a new live range for `value` defined at `def`. /// - /// The live range will be created as dead, but it can be extended with `extend_in_ebb()`. + /// The live range will be created as dead, but it can be extended with `extend_in_block()`. pub fn new(value: Value, def: ProgramPoint, affinity: Affinity) -> Self { Self { value, @@ -222,14 +222,14 @@ impl GenericLiveRange { } } - /// Finds an entry in the compressed set of live-in intervals that contains `ebb`, or return + /// Finds an entry in the compressed set of live-in intervals that contains `block`, or return /// the position where to insert such a new entry. - fn lookup_entry_containing_ebb(&self, ebb: Ebb, order: &PO) -> Result { + fn lookup_entry_containing_block(&self, block: Block, order: &PO) -> Result { self.liveins - .binary_search_by(|interval| order.cmp(interval.begin, ebb)) + .binary_search_by(|interval| order.cmp(interval.begin, block)) .or_else(|n| { - // The previous interval's end might cover the searched ebb. - if n > 0 && cmp!(order, ebb <= self.liveins[n - 1].end) { + // The previous interval's end might cover the searched block. + if n > 0 && cmp!(order, block <= self.liveins[n - 1].end) { Ok(n - 1) } else { Err(n) @@ -237,23 +237,23 @@ impl GenericLiveRange { }) } - /// Extend the local interval for `ebb` so it reaches `to` which must belong to `ebb`. + /// Extend the local interval for `block` so it reaches `to` which must belong to `block`. /// Create a live-in interval if necessary. /// - /// If the live range already has a local interval in `ebb`, extend its end point so it + /// If the live range already has a local interval in `block`, extend its end point so it /// includes `to`, and return false. /// - /// If the live range did not previously have a local interval in `ebb`, add one so the value - /// is live-in to `ebb`, extending to `to`. Return true. + /// If the live range did not previously have a local interval in `block`, add one so the value + /// is live-in to `block`, extending to `to`. Return true. /// /// The return value can be used to detect if we just learned that the value is live-in to - /// `ebb`. This can trigger recursive extensions in `ebb`'s CFG predecessor blocks. - pub fn extend_in_ebb(&mut self, ebb: Ebb, inst: Inst, order: &PO) -> bool { + /// `block`. This can trigger recursive extensions in `block`'s CFG predecessor blocks. + pub fn extend_in_block(&mut self, block: Block, inst: Inst, order: &PO) -> bool { // First check if we're extending the def interval. // - // We're assuming here that `inst` never precedes `def_begin` in the same EBB, but we can't - // check it without a method for getting `inst`'s EBB. - if cmp!(order, ebb <= self.def_end) && cmp!(order, inst >= self.def_begin) { + // We're assuming here that `inst` never precedes `def_begin` in the same block, but we can't + // check it without a method for getting `inst`'s block. + if cmp!(order, block <= self.def_end) && cmp!(order, inst >= self.def_begin) { let inst_pp = inst.into(); debug_assert_ne!( inst_pp, self.def_begin, @@ -266,7 +266,7 @@ impl GenericLiveRange { } // Now check if we're extending any of the existing live-in intervals. - match self.lookup_entry_containing_ebb(ebb, order) { + match self.lookup_entry_containing_block(block, order) { Ok(n) => { // We found one interval and might need to extend it. if cmp!(order, inst <= self.liveins[n].end) { @@ -278,7 +278,7 @@ impl GenericLiveRange { // coalesce the two intervals: // [ival.begin; ival.end] + [next.begin; next.end] = [ival.begin; next.end] if let Some(next) = &self.liveins.get(n + 1) { - if order.is_ebb_gap(inst, next.begin) { + if order.is_block_gap(inst, next.begin) { // At this point we can choose to remove the current interval or the next // one; remove the next one to avoid one memory move. let next_end = next.end; @@ -295,17 +295,17 @@ impl GenericLiveRange { } Err(n) => { - // No interval was found containing the current EBB: we need to insert a new one, + // No interval was found containing the current block: we need to insert a new one, // unless there's a coalescing opportunity with the previous or next one. let coalesce_next = self .liveins .get(n) - .filter(|next| order.is_ebb_gap(inst, next.begin)) + .filter(|next| order.is_block_gap(inst, next.begin)) .is_some(); let coalesce_prev = self .liveins .get(n.wrapping_sub(1)) - .filter(|prev| order.is_ebb_gap(prev.end, ebb)) + .filter(|prev| order.is_block_gap(prev.end, block)) .is_some(); match (coalesce_prev, coalesce_next) { @@ -324,8 +324,8 @@ impl GenericLiveRange { self.liveins[n - 1].end = inst; } (false, true) => { - debug_assert!(cmp!(order, ebb <= self.liveins[n].begin)); - self.liveins[n].begin = ebb; + debug_assert!(cmp!(order, block <= self.liveins[n].begin)); + self.liveins[n].begin = block; } (false, false) => { @@ -333,7 +333,7 @@ impl GenericLiveRange { self.liveins.insert( n, Interval { - begin: ebb, + begin: block, end: inst, }, ); @@ -355,15 +355,15 @@ impl GenericLiveRange { /// Is this a local live range? /// - /// A local live range is only used in the same EBB where it was defined. It is allowed to span - /// multiple basic blocks within that EBB. + /// A local live range is only used in the same block where it was defined. It is allowed to span + /// multiple basic blocks within that block. pub fn is_local(&self) -> bool { self.liveins.is_empty() } /// Get the program point where this live range is defined. /// - /// This will be an EBB header when the value is an EBB argument, otherwise it is the defining + /// This will be an block header when the value is an block argument, otherwise it is the defining /// instruction. pub fn def(&self) -> ProgramPoint { self.def_begin @@ -371,33 +371,33 @@ impl GenericLiveRange { /// Move the definition of this value to a new program point. /// - /// It is only valid to move the definition within the same EBB, and it can't be moved beyond + /// It is only valid to move the definition within the same block, and it can't be moved beyond /// `def_local_end()`. pub fn move_def_locally(&mut self, def: ProgramPoint) { self.def_begin = def; } - /// Get the local end-point of this live range in the EBB where it is defined. + /// Get the local end-point of this live range in the block where it is defined. /// - /// This can be the EBB header itself in the case of a dead EBB argument. + /// This can be the block header itself in the case of a dead block argument. /// Otherwise, it will be the last local use or branch/jump that can reach a use. pub fn def_local_end(&self) -> ProgramPoint { self.def_end } - /// Get the local end-point of this live range in an EBB where it is live-in. + /// Get the local end-point of this live range in an block where it is live-in. /// - /// If this live range is not live-in to `ebb`, return `None`. Otherwise, return the end-point - /// of this live range's local interval in `ebb`. + /// If this live range is not live-in to `block`, return `None`. Otherwise, return the end-point + /// of this live range's local interval in `block`. /// - /// If the live range is live through all of `ebb`, the terminator of `ebb` is a correct + /// If the live range is live through all of `block`, the terminator of `block` is a correct /// answer, but it is also possible that an even later program point is returned. So don't - /// depend on the returned `Inst` to belong to `ebb`. - pub fn livein_local_end(&self, ebb: Ebb, order: &PO) -> Option { - self.lookup_entry_containing_ebb(ebb, order) + /// depend on the returned `Inst` to belong to `block`. + pub fn livein_local_end(&self, block: Block, order: &PO) -> Option { + self.lookup_entry_containing_block(block, order) .and_then(|i| { let inst = self.liveins[i].end; - if cmp!(order, ebb < inst) { + if cmp!(order, block < inst) { Ok(inst) } else { // Can be any error type, really, since it's discarded by ok(). @@ -407,25 +407,25 @@ impl GenericLiveRange { .ok() } - /// Is this value live-in to `ebb`? + /// Is this value live-in to `block`? /// - /// An EBB argument is not considered to be live in. - pub fn is_livein(&self, ebb: Ebb, order: &PO) -> bool { - self.livein_local_end(ebb, order).is_some() + /// An block argument is not considered to be live in. + pub fn is_livein(&self, block: Block, order: &PO) -> bool { + self.livein_local_end(block, order).is_some() } /// Get all the live-in intervals. /// /// Note that the intervals are stored in a compressed form so each entry may span multiple - /// EBBs where the value is live in. - pub fn liveins<'a>(&'a self) -> impl Iterator + 'a { + /// blocks where the value is live in. + pub fn liveins<'a>(&'a self) -> impl Iterator + 'a { self.liveins .iter() .map(|interval| (interval.begin, interval.end)) } - /// Check if this live range overlaps a definition in `ebb`. - pub fn overlaps_def(&self, def: ExpandedProgramPoint, ebb: Ebb, order: &PO) -> bool { + /// Check if this live range overlaps a definition in `block`. + pub fn overlaps_def(&self, def: ExpandedProgramPoint, block: Block, order: &PO) -> bool { // Two defs at the same program point always overlap, even if one is dead. if def == self.def_begin.into() { return true; @@ -437,29 +437,29 @@ impl GenericLiveRange { } // Check for an overlap with a live-in range. - match self.livein_local_end(ebb, order) { + match self.livein_local_end(block, order) { Some(inst) => cmp!(order, def < inst), None => false, } } - /// Check if this live range reaches a use at `user` in `ebb`. - pub fn reaches_use(&self, user: Inst, ebb: Ebb, order: &PO) -> bool { + /// Check if this live range reaches a use at `user` in `block`. + pub fn reaches_use(&self, user: Inst, block: Block, order: &PO) -> bool { // Check for an overlap with the local range. if cmp!(order, user > self.def_begin) && cmp!(order, user <= self.def_end) { return true; } // Check for an overlap with a live-in range. - match self.livein_local_end(ebb, order) { + match self.livein_local_end(block, order) { Some(inst) => cmp!(order, user <= inst), None => false, } } - /// Check if this live range is killed at `user` in `ebb`. - pub fn killed_at(&self, user: Inst, ebb: Ebb, order: &PO) -> bool { - self.def_local_end() == user.into() || self.livein_local_end(ebb, order) == Some(user) + /// Check if this live range is killed at `user` in `block`. + pub fn killed_at(&self, user: Inst, block: Block, order: &PO) -> bool { + self.def_local_end() == user.into() || self.livein_local_end(block, order) == Some(user) } } @@ -474,15 +474,15 @@ impl SparseMapValue for GenericLiveRange { mod tests { use super::{GenericLiveRange, Interval}; use crate::entity::EntityRef; - use crate::ir::{Ebb, Inst, Value}; + use crate::ir::{Block, Inst, Value}; use crate::ir::{ExpandedProgramPoint, ProgramOrder}; use alloc::vec::Vec; use core::cmp::Ordering; // Dummy program order which simply compares indexes. - // It is assumed that EBBs have indexes that are multiples of 10, and instructions have indexes - // in between. `is_ebb_gap` assumes that terminator instructions have indexes of the form - // ebb * 10 + 1. This is used in the coalesce test. + // It is assumed that blocks have indexes that are multiples of 10, and instructions have indexes + // in between. `is_block_gap` assumes that terminator instructions have indexes of the form + // block * 10 + 1. This is used in the coalesce test. struct ProgOrder {} impl ProgramOrder for ProgOrder { @@ -494,7 +494,7 @@ mod tests { fn idx(pp: ExpandedProgramPoint) -> usize { match pp { ExpandedProgramPoint::Inst(i) => i.index(), - ExpandedProgramPoint::Ebb(e) => e.index(), + ExpandedProgramPoint::Block(e) => e.index(), } } @@ -503,31 +503,31 @@ mod tests { ia.cmp(&ib) } - fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool { - inst.index() % 10 == 1 && ebb.index() / 10 == inst.index() / 10 + 1 + fn is_block_gap(&self, inst: Inst, block: Block) -> bool { + inst.index() % 10 == 1 && block.index() / 10 == inst.index() / 10 + 1 } } impl ProgOrder { - // Get the EBB corresponding to `inst`. - fn inst_ebb(&self, inst: Inst) -> Ebb { + // Get the block corresponding to `inst`. + fn inst_block(&self, inst: Inst) -> Block { let i = inst.index(); - Ebb::new(i - i % 10) + Block::new(i - i % 10) } - // Get the EBB of a program point. - fn pp_ebb>(&self, pp: PP) -> Ebb { + // Get the block of a program point. + fn pp_block>(&self, pp: PP) -> Block { match pp.into() { - ExpandedProgramPoint::Inst(i) => self.inst_ebb(i), - ExpandedProgramPoint::Ebb(e) => e, + ExpandedProgramPoint::Inst(i) => self.inst_block(i), + ExpandedProgramPoint::Block(e) => e, } } // Validate the live range invariants. fn validate(&self, lr: &GenericLiveRange) { - // The def interval must cover a single EBB. - let def_ebb = self.pp_ebb(lr.def_begin); - assert_eq!(def_ebb, self.pp_ebb(lr.def_end)); + // The def interval must cover a single block. + let def_block = self.pp_block(lr.def_begin); + assert_eq!(def_block, self.pp_block(lr.def_end)); // Check that the def interval isn't backwards. match self.cmp(lr.def_begin, lr.def_end) { @@ -552,7 +552,7 @@ mod tests { assert!( self.cmp(lr.def_end, begin) == Ordering::Less || self.cmp(lr.def_begin, end) == Ordering::Greater, - "Interval can't overlap the def EBB" + "Interval can't overlap the def block" ); // Save for next round. @@ -567,10 +567,10 @@ mod tests { #[test] fn dead_def_range() { let v0 = Value::new(0); - let e0 = Ebb::new(0); + let e0 = Block::new(0); let i1 = Inst::new(1); let i2 = Inst::new(2); - let e2 = Ebb::new(2); + let e2 = Block::new(2); let lr = GenericLiveRange::new(v0, i1.into(), Default::default()); assert!(lr.is_dead()); assert!(lr.is_local()); @@ -588,13 +588,13 @@ mod tests { #[test] fn dead_arg_range() { let v0 = Value::new(0); - let e2 = Ebb::new(2); + let e2 = Block::new(2); let lr = GenericLiveRange::new(v0, e2.into(), Default::default()); assert!(lr.is_dead()); assert!(lr.is_local()); assert_eq!(lr.def(), e2.into()); assert_eq!(lr.def_local_end(), e2.into()); - // The def interval of an EBB argument does not count as live-in. + // The def interval of an block argument does not count as live-in. assert_eq!(lr.livein_local_end(e2, PO), None); PO.validate(&lr); } @@ -602,13 +602,13 @@ mod tests { #[test] fn local_def() { let v0 = Value::new(0); - let e10 = Ebb::new(10); + let e10 = Block::new(10); let i11 = Inst::new(11); let i12 = Inst::new(12); let i13 = Inst::new(13); let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default()); - assert_eq!(lr.extend_in_ebb(e10, i13, PO), false); + assert_eq!(lr.extend_in_block(e10, i13, PO), false); PO.validate(&lr); assert!(!lr.is_dead()); assert!(lr.is_local()); @@ -616,7 +616,7 @@ mod tests { assert_eq!(lr.def_local_end(), i13.into()); // Extending to an already covered inst should not change anything. - assert_eq!(lr.extend_in_ebb(e10, i12, PO), false); + assert_eq!(lr.extend_in_block(e10, i12, PO), false); PO.validate(&lr); assert_eq!(lr.def(), i11.into()); assert_eq!(lr.def_local_end(), i13.into()); @@ -625,15 +625,15 @@ mod tests { #[test] fn local_arg() { let v0 = Value::new(0); - let e10 = Ebb::new(10); + let e10 = Block::new(10); let i11 = Inst::new(11); let i12 = Inst::new(12); let i13 = Inst::new(13); let mut lr = GenericLiveRange::new(v0, e10.into(), Default::default()); - // Extending a dead EBB argument in its own block should not indicate that a live-in + // Extending a dead block argument in its own block should not indicate that a live-in // interval was created. - assert_eq!(lr.extend_in_ebb(e10, i12, PO), false); + assert_eq!(lr.extend_in_block(e10, i12, PO), false); PO.validate(&lr); assert!(!lr.is_dead()); assert!(lr.is_local()); @@ -641,13 +641,13 @@ mod tests { assert_eq!(lr.def_local_end(), i12.into()); // Extending to an already covered inst should not change anything. - assert_eq!(lr.extend_in_ebb(e10, i11, PO), false); + assert_eq!(lr.extend_in_block(e10, i11, PO), false); PO.validate(&lr); assert_eq!(lr.def(), e10.into()); assert_eq!(lr.def_local_end(), i12.into()); // Extending further. - assert_eq!(lr.extend_in_ebb(e10, i13, PO), false); + assert_eq!(lr.extend_in_block(e10, i13, PO), false); PO.validate(&lr); assert_eq!(lr.def(), e10.into()); assert_eq!(lr.def_local_end(), i13.into()); @@ -656,28 +656,28 @@ mod tests { #[test] fn global_def() { let v0 = Value::new(0); - let e10 = Ebb::new(10); + let e10 = Block::new(10); let i11 = Inst::new(11); let i12 = Inst::new(12); - let e20 = Ebb::new(20); + let e20 = Block::new(20); let i21 = Inst::new(21); let i22 = Inst::new(22); let i23 = Inst::new(23); let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default()); - assert_eq!(lr.extend_in_ebb(e10, i12, PO), false); + assert_eq!(lr.extend_in_block(e10, i12, PO), false); // Adding a live-in interval. - assert_eq!(lr.extend_in_ebb(e20, i22, PO), true); + assert_eq!(lr.extend_in_block(e20, i22, PO), true); PO.validate(&lr); assert_eq!(lr.livein_local_end(e20, PO), Some(i22)); // Non-extending the live-in. - assert_eq!(lr.extend_in_ebb(e20, i21, PO), false); + assert_eq!(lr.extend_in_block(e20, i21, PO), false); assert_eq!(lr.livein_local_end(e20, PO), Some(i22)); // Extending the existing live-in. - assert_eq!(lr.extend_in_ebb(e20, i23, PO), false); + assert_eq!(lr.extend_in_block(e20, i23, PO), false); PO.validate(&lr); assert_eq!(lr.livein_local_end(e20, PO), Some(i23)); } @@ -686,35 +686,35 @@ mod tests { fn coalesce() { let v0 = Value::new(0); let i11 = Inst::new(11); - let e20 = Ebb::new(20); + let e20 = Block::new(20); let i21 = Inst::new(21); - let e30 = Ebb::new(30); + let e30 = Block::new(30); let i31 = Inst::new(31); - let e40 = Ebb::new(40); + let e40 = Block::new(40); let i41 = Inst::new(41); let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default()); - assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true); + assert_eq!(lr.extend_in_block(e30, i31, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e30, i31)]); // Coalesce to previous - assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true); + assert_eq!(lr.extend_in_block(e40, i41, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e30, i41)]); // Coalesce to next - assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true); + assert_eq!(lr.extend_in_block(e20, i21, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e20, i41)]); let mut lr = GenericLiveRange::new(v0, i11.into(), Default::default()); - assert_eq!(lr.extend_in_ebb(e40, i41, PO,), true); + assert_eq!(lr.extend_in_block(e40, i41, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e40, i41)]); - assert_eq!(lr.extend_in_ebb(e20, i21, PO,), true); + assert_eq!(lr.extend_in_block(e20, i21, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e20, i21), (e40, i41)]); // Coalesce to previous and next - assert_eq!(lr.extend_in_ebb(e30, i31, PO,), true); + assert_eq!(lr.extend_in_block(e30, i31, PO,), true); assert_eq!(lr.liveins().collect::>(), [(e20, i41)]); } } diff --git a/cranelift/codegen/src/regalloc/reload.rs b/cranelift/codegen/src/regalloc/reload.rs index 0f4f595346..cdafb68af8 100644 --- a/cranelift/codegen/src/regalloc/reload.rs +++ b/cranelift/codegen/src/regalloc/reload.rs @@ -13,7 +13,7 @@ use crate::cursor::{Cursor, EncCursor}; use crate::dominator_tree::DominatorTree; use crate::entity::{SparseMap, SparseMapValue}; use crate::ir::{AbiParam, ArgumentLoc, InstBuilder}; -use crate::ir::{Ebb, Function, Inst, InstructionData, Opcode, Value, ValueLoc}; +use crate::ir::{Block, Function, Inst, InstructionData, Opcode, Value, ValueLoc}; use crate::isa::RegClass; use crate::isa::{ConstraintKind, EncInfo, Encoding, RecipeConstraints, TargetIsa}; use crate::regalloc::affinity::Affinity; @@ -113,24 +113,24 @@ impl SparseMapValue for ReloadedValue { impl<'a> Context<'a> { fn run(&mut self, tracker: &mut LiveValueTracker) { - self.topo.reset(self.cur.func.layout.ebbs()); - while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) { - self.visit_ebb(ebb, tracker); + self.topo.reset(self.cur.func.layout.blocks()); + while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) { + self.visit_block(block, tracker); } } - fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) { - debug!("Reloading {}:", ebb); - self.visit_ebb_header(ebb, tracker); + fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) { + debug!("Reloading {}:", block); + self.visit_block_header(block, tracker); tracker.drop_dead_params(); - // visit_ebb_header() places us at the first interesting instruction in the EBB. + // visit_block_header() places us at the first interesting instruction in the block. while let Some(inst) = self.cur.current_inst() { if !self.cur.func.dfg[inst].opcode().is_ghost() { // This instruction either has an encoding or has ABI constraints, so visit it to // insert spills and fills as needed. let encoding = self.cur.func.encodings[inst]; - self.visit_inst(ebb, inst, encoding, tracker); + self.visit_inst(block, inst, encoding, tracker); tracker.drop_dead(inst); } else { // This is a ghost instruction with no encoding and no extra constraints, so we can @@ -140,29 +140,29 @@ impl<'a> Context<'a> { } } - /// Process the EBB parameters. Move to the next instruction in the EBB to be processed - fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) { - let (liveins, args) = tracker.ebb_top( - ebb, + /// Process the block parameters. Move to the next instruction in the block to be processed + fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) { + let (liveins, args) = tracker.block_top( + block, &self.cur.func.dfg, self.liveness, &self.cur.func.layout, self.domtree, ); - if self.cur.func.layout.entry_block() == Some(ebb) { + if self.cur.func.layout.entry_block() == Some(block) { debug_assert_eq!(liveins.len(), 0); - self.visit_entry_params(ebb, args); + self.visit_entry_params(block, args); } else { - self.visit_ebb_params(ebb, args); + self.visit_block_params(block, args); } } /// Visit the parameters on the entry block. /// These values have ABI constraints from the function signature. - fn visit_entry_params(&mut self, ebb: Ebb, args: &[LiveValue]) { + fn visit_entry_params(&mut self, block: Block, args: &[LiveValue]) { debug_assert_eq!(self.cur.func.signature.params.len(), args.len()); - self.cur.goto_first_inst(ebb); + self.cur.goto_first_inst(block); for (arg_idx, arg) in args.iter().enumerate() { let abi = self.cur.func.signature.params[arg_idx]; @@ -175,10 +175,10 @@ impl<'a> Context<'a> { .cur .func .dfg - .replace_ebb_param(arg.value, abi.value_type); + .replace_block_param(arg.value, abi.value_type); let affinity = Affinity::abi(&abi, self.cur.isa); - self.liveness.create_dead(reg, ebb, affinity); - self.insert_spill(ebb, arg.value, reg); + self.liveness.create_dead(reg, block, affinity); + self.insert_spill(block, arg.value, reg); } } ArgumentLoc::Stack(_) => { @@ -189,15 +189,15 @@ impl<'a> Context<'a> { } } - fn visit_ebb_params(&mut self, ebb: Ebb, _args: &[LiveValue]) { - self.cur.goto_first_inst(ebb); + fn visit_block_params(&mut self, block: Block, _args: &[LiveValue]) { + self.cur.goto_first_inst(block); } /// Process the instruction pointed to by `pos`, and advance the cursor to the next instruction /// that needs processing. fn visit_inst( &mut self, - ebb: Ebb, + block: Block, inst: Inst, encoding: Encoding, tracker: &mut LiveValueTracker, @@ -265,7 +265,7 @@ impl<'a> Context<'a> { { self.reload_copy_candidates(inst); } else { - self.reload_inst_candidates(ebb, inst); + self.reload_inst_candidates(block, inst); } // TODO: Reuse reloads for future instructions. @@ -304,7 +304,7 @@ impl<'a> Context<'a> { let value_type = self.cur.func.dfg.value_type(lv.value); let reg = self.cur.func.dfg.replace_result(lv.value, value_type); self.liveness.create_dead(reg, inst, Affinity::new(op)); - self.insert_spill(ebb, lv.value, reg); + self.insert_spill(block, lv.value, reg); } } } @@ -333,14 +333,14 @@ impl<'a> Context<'a> { let reg = self.cur.func.dfg.replace_result(lv.value, abi.value_type); self.liveness .create_dead(reg, inst, Affinity::abi(&abi, self.cur.isa)); - self.insert_spill(ebb, lv.value, reg); + self.insert_spill(block, lv.value, reg); } } } } // Reload the current candidates for the given `inst`. - fn reload_inst_candidates(&mut self, ebb: Ebb, inst: Inst) { + fn reload_inst_candidates(&mut self, block: Block, inst: Inst) { // Insert fill instructions before `inst` and replace `cand.value` with the filled value. for cand in self.candidates.iter_mut() { if let Some(reload) = self.reloads.get(cand.value) { @@ -361,15 +361,15 @@ impl<'a> Context<'a> { let affinity = Affinity::Reg(cand.regclass.into()); self.liveness.create_dead(reg, fill, affinity); self.liveness - .extend_locally(reg, ebb, inst, &self.cur.func.layout); + .extend_locally(reg, block, inst, &self.cur.func.layout); } // Rewrite instruction arguments. // - // Only rewrite those arguments that were identified as candidates. This leaves EBB - // arguments on branches as-is without rewriting them. A spilled EBB argument needs to stay - // spilled because the matching EBB parameter is going to be in the same virtual register - // and therefore the same stack slot as the EBB argument value. + // Only rewrite those arguments that were identified as candidates. This leaves block + // arguments on branches as-is without rewriting them. A spilled block argument needs to stay + // spilled because the matching block parameter is going to be in the same virtual register + // and therefore the same stack slot as the block argument value. if !self.candidates.is_empty() { let args = self.cur.func.dfg.inst_args_mut(inst); while let Some(cand) = self.candidates.pop() { @@ -448,14 +448,14 @@ impl<'a> Context<'a> { /// - Insert `stack = spill reg` at `pos`, and assign an encoding. /// - Move the `stack` live range starting point to the new instruction. /// - Extend the `reg` live range to reach the new instruction. - fn insert_spill(&mut self, ebb: Ebb, stack: Value, reg: Value) { + fn insert_spill(&mut self, block: Block, stack: Value, reg: Value) { self.cur.ins().with_result(stack).spill(reg); let inst = self.cur.built_inst(); // Update live ranges. self.liveness.move_def_locally(stack, inst); self.liveness - .extend_locally(reg, ebb, inst, &self.cur.func.layout); + .extend_locally(reg, block, inst, &self.cur.func.layout); } } diff --git a/cranelift/codegen/src/regalloc/safepoint.rs b/cranelift/codegen/src/regalloc/safepoint.rs index ba846190f3..128900d360 100644 --- a/cranelift/codegen/src/regalloc/safepoint.rs +++ b/cranelift/codegen/src/regalloc/safepoint.rs @@ -32,7 +32,7 @@ fn insert_and_encode_safepoint<'f>( } // The emit_stackmaps() function analyzes each instruction to retrieve the liveness of -// the defs and operands by traversing a function's ebbs in layout order. +// the defs and operands by traversing a function's blocks in layout order. pub fn emit_stackmaps( func: &mut Function, domtree: &DominatorTree, @@ -42,13 +42,13 @@ pub fn emit_stackmaps( ) { let mut curr = func.layout.entry_block(); - while let Some(ebb) = curr { - tracker.ebb_top(ebb, &func.dfg, liveness, &func.layout, domtree); + while let Some(block) = curr { + tracker.block_top(block, &func.dfg, liveness, &func.layout, domtree); tracker.drop_dead_params(); let mut pos = FuncCursor::new(func); - // From the top of the ebb, step through the instructions. - pos.goto_top(ebb); + // From the top of the block, step through the instructions. + pos.goto_top(block); while let Some(inst) = pos.next_inst() { if let InstructionData::Trap { @@ -67,6 +67,6 @@ pub fn emit_stackmaps( tracker.process_inst(inst, &pos.func.dfg, liveness); tracker.drop_dead(inst); } - curr = func.layout.next_ebb(ebb); + curr = func.layout.next_block(block); } } diff --git a/cranelift/codegen/src/regalloc/solver.rs b/cranelift/codegen/src/regalloc/solver.rs index 96ce702fdc..7416ec9bc7 100644 --- a/cranelift/codegen/src/regalloc/solver.rs +++ b/cranelift/codegen/src/regalloc/solver.rs @@ -34,20 +34,20 @@ //! # Register diversions and global interference //! //! We can divert register values temporarily to satisfy constraints, but we need to put the -//! values back into their originally assigned register locations before leaving the EBB. -//! Otherwise, values won't be in the right register at the entry point of other EBBs. +//! values back into their originally assigned register locations before leaving the block. +//! Otherwise, values won't be in the right register at the entry point of other blocks. //! //! Some values are *local*, and we don't need to worry about putting those values back since they -//! are not used in any other EBBs. +//! are not used in any other blocks. //! //! When we assign register locations to defines, we are assigning both the register used locally //! immediately after the instruction and the register used globally when the defined value is used -//! in a different EBB. We need to avoid interference both locally at the instruction and globally. +//! in a different block. We need to avoid interference both locally at the instruction and globally. //! //! We have multiple mappings of values to registers: //! //! 1. The initial local mapping before the instruction. This includes any diversions from previous -//! instructions in the EBB, but not diversions for the current instruction. +//! instructions in the block, but not diversions for the current instruction. //! 2. The local mapping after applying the additional reassignments required to satisfy the //! constraints of the current instruction. //! 3. The local mapping after the instruction. This excludes values killed by the instruction and diff --git a/cranelift/codegen/src/regalloc/spilling.rs b/cranelift/codegen/src/regalloc/spilling.rs index e7127b9606..d27c68ae42 100644 --- a/cranelift/codegen/src/regalloc/spilling.rs +++ b/cranelift/codegen/src/regalloc/spilling.rs @@ -17,7 +17,7 @@ use crate::cursor::{Cursor, EncCursor}; use crate::dominator_tree::DominatorTree; -use crate::ir::{ArgumentLoc, Ebb, Function, Inst, InstBuilder, SigRef, Value, ValueLoc}; +use crate::ir::{ArgumentLoc, Block, Function, Inst, InstBuilder, SigRef, Value, ValueLoc}; use crate::isa::registers::{RegClass, RegClassIndex, RegClassMask, RegUnit}; use crate::isa::{ConstraintKind, EncInfo, RecipeConstraints, RegInfo, TargetIsa}; use crate::regalloc::affinity::Affinity; @@ -121,22 +121,22 @@ impl Spilling { impl<'a> Context<'a> { fn run(&mut self, tracker: &mut LiveValueTracker) { - self.topo.reset(self.cur.func.layout.ebbs()); - while let Some(ebb) = self.topo.next(&self.cur.func.layout, self.domtree) { - self.visit_ebb(ebb, tracker); + self.topo.reset(self.cur.func.layout.blocks()); + while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) { + self.visit_block(block, tracker); } } - fn visit_ebb(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) { - debug!("Spilling {}:", ebb); - self.cur.goto_top(ebb); - self.visit_ebb_header(ebb, tracker); + fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) { + debug!("Spilling {}:", block); + self.cur.goto_top(block); + self.visit_block_header(block, tracker); tracker.drop_dead_params(); self.process_spills(tracker); while let Some(inst) = self.cur.next_inst() { if !self.cur.func.dfg[inst].opcode().is_ghost() { - self.visit_inst(inst, ebb, tracker); + self.visit_inst(inst, block, tracker); } else { let (_throughs, kills) = tracker.process_ghost(inst); self.free_regs(kills); @@ -185,9 +185,9 @@ impl<'a> Context<'a> { } } - fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) { - let (liveins, params) = tracker.ebb_top( - ebb, + fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) { + let (liveins, params) = tracker.block_top( + block, &self.cur.func.dfg, self.liveness, &self.cur.func.layout, @@ -199,26 +199,26 @@ impl<'a> Context<'a> { self.pressure.reset(); self.take_live_regs(liveins); - // An EBB can have an arbitrary (up to 2^16...) number of parameters, so they are not + // An block can have an arbitrary (up to 2^16...) number of parameters, so they are not // guaranteed to fit in registers. for lv in params { if let Affinity::Reg(rci) = lv.affinity { let rc = self.reginfo.rc(rci); 'try_take: while let Err(mask) = self.pressure.take_transient(rc) { - debug!("Need {} reg for EBB param {}", rc, lv.value); + debug!("Need {} reg for block param {}", rc, lv.value); match self.spill_candidate(mask, liveins) { Some(cand) => { debug!( - "Spilling live-in {} to make room for {} EBB param {}", + "Spilling live-in {} to make room for {} block param {}", cand, rc, lv.value ); self.spill_reg(cand); } None => { // We can't spill any of the live-in registers, so we have to spill an - // EBB argument. Since the current spill metric would consider all the - // EBB arguments equal, just spill the present register. - debug!("Spilling {} EBB argument {}", rc, lv.value); + // block argument. Since the current spill metric would consider all the + // block arguments equal, just spill the present register. + debug!("Spilling {} block argument {}", rc, lv.value); // Since `spill_reg` will free a register, add the current one here. self.pressure.take(rc); @@ -230,15 +230,15 @@ impl<'a> Context<'a> { } } - // The transient pressure counts for the EBB arguments are accurate. Just preserve them. + // The transient pressure counts for the block arguments are accurate. Just preserve them. self.pressure.preserve_transient(); self.free_dead_regs(params); } - fn visit_inst(&mut self, inst: Inst, ebb: Ebb, tracker: &mut LiveValueTracker) { + fn visit_inst(&mut self, inst: Inst, block: Block, tracker: &mut LiveValueTracker) { debug!("Inst {}, {}", self.cur.display_inst(inst), self.pressure); debug_assert_eq!(self.cur.current_inst(), Some(inst)); - debug_assert_eq!(self.cur.current_ebb(), Some(ebb)); + debug_assert_eq!(self.cur.current_block(), Some(block)); let constraints = self .encinfo @@ -246,7 +246,7 @@ impl<'a> Context<'a> { // We may need to resolve register constraints if there are any noteworthy uses. debug_assert!(self.reg_uses.is_empty()); - self.collect_reg_uses(inst, ebb, constraints); + self.collect_reg_uses(inst, block, constraints); // Calls usually have fixed register uses. let call_sig = self.cur.func.dfg.call_signature(inst); @@ -313,7 +313,12 @@ impl<'a> Context<'a> { // We are assuming here that if a value is used both by a fixed register operand and a register // class operand, they two are compatible. We are also assuming that two register class // operands are always compatible. - fn collect_reg_uses(&mut self, inst: Inst, ebb: Ebb, constraints: Option<&RecipeConstraints>) { + fn collect_reg_uses( + &mut self, + inst: Inst, + block: Block, + constraints: Option<&RecipeConstraints>, + ) { let args = self.cur.func.dfg.inst_args(inst); let num_fixed_ins = if let Some(constraints) = constraints { for (idx, (op, &arg)) in constraints.ins.iter().zip(args).enumerate() { @@ -324,11 +329,11 @@ impl<'a> Context<'a> { ConstraintKind::FixedReg(_) => reguse.fixed = true, ConstraintKind::Tied(_) => { // A tied operand must kill the used value. - reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout); + reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout); } ConstraintKind::FixedTied(_) => { reguse.fixed = true; - reguse.tied = !lr.killed_at(inst, ebb, &self.cur.func.layout); + reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout); } ConstraintKind::Reg => {} } @@ -450,10 +455,10 @@ impl<'a> Context<'a> { // Spill a live register that is *not* used by the current instruction. // Spilling a use wouldn't help. // - // Do allow spilling of EBB arguments on branches. This is safe since we spill - // the whole virtual register which includes the matching EBB parameter value + // Do allow spilling of block arguments on branches. This is safe since we spill + // the whole virtual register which includes the matching block parameter value // at the branch destination. It is also necessary since there can be - // arbitrarily many EBB arguments. + // arbitrarily many block arguments. match { let args = if self.cur.func.dfg[inst].opcode().is_branch() { self.cur.func.dfg.inst_fixed_args(inst) @@ -572,7 +577,7 @@ impl<'a> Context<'a> { self.liveness.create_dead(copy, inst, Affinity::Reg(rci)); self.liveness.extend_locally( copy, - self.cur.func.layout.pp_ebb(inst), + self.cur.func.layout.pp_block(inst), self.cur.current_inst().expect("must be at an instruction"), &self.cur.func.layout, ); diff --git a/cranelift/codegen/src/regalloc/virtregs.rs b/cranelift/codegen/src/regalloc/virtregs.rs index 8e1f4a0276..28af9e22df 100644 --- a/cranelift/codegen/src/regalloc/virtregs.rs +++ b/cranelift/codegen/src/regalloc/virtregs.rs @@ -5,11 +5,11 @@ //! output. //! //! A virtual register is typically built by merging together SSA values that are "phi-related" - -//! that is, one value is passed as an EBB argument to a branch and the other is the EBB parameter +//! that is, one value is passed as an block argument to a branch and the other is the block parameter //! value itself. //! //! If any values in a virtual register are spilled, they will use the same stack slot. This avoids -//! memory-to-memory copies when a spilled value is passed as an EBB argument. +//! memory-to-memory copies when a spilled value is passed as an block argument. use crate::dbg::DisplayList; use crate::dominator_tree::DominatorTreePreorder; diff --git a/cranelift/codegen/src/simple_gvn.rs b/cranelift/codegen/src/simple_gvn.rs index 60771d4cd8..5351aced43 100644 --- a/cranelift/codegen/src/simple_gvn.rs +++ b/cranelift/codegen/src/simple_gvn.rs @@ -59,7 +59,7 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) { let _tt = timing::gvn(); debug_assert!(domtree.is_valid()); - // Visit EBBs in a reverse post-order. + // Visit blocks in a reverse post-order. // // The RefCell here is a bit ugly since the HashKeys in the ScopedHashMap // need a reference to the function. @@ -68,13 +68,13 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) { let mut visible_values: ScopedHashMap = ScopedHashMap::new(); let mut scope_stack: Vec = Vec::new(); - for &ebb in domtree.cfg_postorder().iter().rev() { + for &block in domtree.cfg_postorder().iter().rev() { { // Pop any scopes that we just exited. let layout = &pos.borrow().func.layout; loop { if let Some(current) = scope_stack.last() { - if domtree.dominates(*current, ebb, layout) { + if domtree.dominates(*current, block, layout) { break; } } else { @@ -85,11 +85,11 @@ pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) { } // Push a scope for the current block. - scope_stack.push(layout.first_inst(ebb).unwrap()); + scope_stack.push(layout.first_inst(block).unwrap()); visible_values.increment_depth(); } - pos.borrow_mut().goto_top(ebb); + pos.borrow_mut().goto_top(block); while let Some(inst) = { let mut pos = pos.borrow_mut(); pos.next_inst() diff --git a/cranelift/codegen/src/simple_preopt.rs b/cranelift/codegen/src/simple_preopt.rs index fb91417131..c5a02caea4 100644 --- a/cranelift/codegen/src/simple_preopt.rs +++ b/cranelift/codegen/src/simple_preopt.rs @@ -14,7 +14,7 @@ use crate::ir::{ immediates, instructions::{Opcode, ValueList}, types::{I16, I32, I64, I8}, - DataFlowGraph, Ebb, Function, Inst, InstBuilder, InstructionData, Type, Value, + Block, DataFlowGraph, Function, Inst, InstBuilder, InstructionData, Type, Value, }; use crate::isa::TargetIsa; use crate::timing; @@ -810,10 +810,10 @@ enum BranchOrderKind { /// Reorder branches to encourage fallthroughs. /// -/// When an ebb ends with a conditional branch followed by an unconditional -/// branch, this will reorder them if one of them is branching to the next Ebb +/// When an block ends with a conditional branch followed by an unconditional +/// branch, this will reorder them if one of them is branching to the next Block /// layout-wise. The unconditional jump can then become a fallthrough. -fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst: Inst) { +fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, block: Block, inst: Inst) { let (term_inst, term_inst_args, term_dest, cond_inst, cond_inst_args, cond_dest, kind) = match pos.func.dfg[inst] { InstructionData::Jump { @@ -821,13 +821,13 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst destination, ref args, } => { - let next_ebb = if let Some(next_ebb) = pos.func.layout.next_ebb(ebb) { - next_ebb + let next_block = if let Some(next_block) = pos.func.layout.next_block(block) { + next_block } else { return; }; - if destination == next_ebb { + if destination == next_block { return; } @@ -840,7 +840,7 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst let prev_inst_data = &pos.func.dfg[prev_inst]; if let Some(prev_dest) = prev_inst_data.branch_destination() { - if prev_dest != next_ebb { + if prev_dest != next_block { return; } } else { @@ -941,7 +941,7 @@ fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, ebb: Ebb, inst } } - cfg.recompute_ebb(pos.func, ebb); + cfg.recompute_block(pos.func, block); } /// The main pre-opt pass. @@ -949,7 +949,7 @@ pub fn do_preopt(func: &mut Function, cfg: &mut ControlFlowGraph, isa: &dyn Targ let _tt = timing::preopt(); let mut pos = FuncCursor::new(func); let native_word_width = isa.pointer_bytes(); - while let Some(ebb) = pos.next_ebb() { + while let Some(block) = pos.next_block() { while let Some(inst) = pos.next_inst() { // Apply basic simplifications. simplify(&mut pos, inst, native_word_width as u32); @@ -961,7 +961,7 @@ pub fn do_preopt(func: &mut Function, cfg: &mut ControlFlowGraph, isa: &dyn Targ } branch_opt(&mut pos, inst); - branch_order(&mut pos, cfg, ebb, inst); + branch_order(&mut pos, cfg, block, inst); } } } diff --git a/cranelift/codegen/src/topo_order.rs b/cranelift/codegen/src/topo_order.rs index 647a8da941..8d38e4f324 100644 --- a/cranelift/codegen/src/topo_order.rs +++ b/cranelift/codegen/src/topo_order.rs @@ -1,28 +1,28 @@ -//! Topological order of EBBs, according to the dominator tree. +//! Topological order of blocks, according to the dominator tree. use crate::dominator_tree::DominatorTree; use crate::entity::EntitySet; -use crate::ir::{Ebb, Layout}; +use crate::ir::{Block, Layout}; use alloc::vec::Vec; -/// Present EBBs in a topological order such that all dominating EBBs are guaranteed to be visited -/// before the current EBB. +/// Present blocks in a topological order such that all dominating blocks are guaranteed to be visited +/// before the current block. /// -/// There are many topological orders of the EBBs in a function, so it is possible to provide a -/// preferred order, and the `TopoOrder` will present EBBs in an order that is as close as possible +/// There are many topological orders of the blocks in a function, so it is possible to provide a +/// preferred order, and the `TopoOrder` will present blocks in an order that is as close as possible /// to the preferred order. pub struct TopoOrder { - /// Preferred order of EBBs to visit. - preferred: Vec, + /// Preferred order of blocks to visit. + preferred: Vec, /// Next entry to get from `preferred`. next: usize, - /// Set of visited EBBs. - visited: EntitySet, + /// Set of visited blocks. + visited: EntitySet, - /// Stack of EBBs to be visited next, already in `visited`. - stack: Vec, + /// Stack of blocks to be visited next, already in `visited`. + stack: Vec, } impl TopoOrder { @@ -44,11 +44,11 @@ impl TopoOrder { self.stack.clear(); } - /// Reset and initialize with a preferred sequence of EBBs. The resulting topological order is - /// guaranteed to contain all of the EBBs in `preferred` as well as any dominators. - pub fn reset(&mut self, preferred: Ebbs) + /// Reset and initialize with a preferred sequence of blocks. The resulting topological order is + /// guaranteed to contain all of the blocks in `preferred` as well as any dominators. + pub fn reset(&mut self, preferred: Blocks) where - Ebbs: IntoIterator, + Blocks: IntoIterator, { self.preferred.clear(); self.preferred.extend(preferred); @@ -57,27 +57,29 @@ impl TopoOrder { self.stack.clear(); } - /// Get the next EBB in the topological order. + /// Get the next block in the topological order. /// - /// Two things are guaranteed about the EBBs returned by this function: + /// Two things are guaranteed about the blocks returned by this function: /// - /// - All EBBs in the `preferred` iterator given to `reset` will be returned. - /// - All dominators are visited before the EBB returned. - pub fn next(&mut self, layout: &Layout, domtree: &DominatorTree) -> Option { - self.visited.resize(layout.ebb_capacity()); + /// - All blocks in the `preferred` iterator given to `reset` will be returned. + /// - All dominators are visited before the block returned. + pub fn next(&mut self, layout: &Layout, domtree: &DominatorTree) -> Option { + self.visited.resize(layout.block_capacity()); // Any entries in `stack` should be returned immediately. They have already been added to // `visited`. while self.stack.is_empty() { match self.preferred.get(self.next).cloned() { None => return None, - Some(mut ebb) => { - // We have the next EBB in the preferred order. + Some(mut block) => { + // We have the next block in the preferred order. self.next += 1; // Push it along with any non-visited dominators. - while self.visited.insert(ebb) { - self.stack.push(ebb); - match domtree.idom(ebb) { - Some(idom) => ebb = layout.inst_ebb(idom).expect("idom not in layout"), + while self.visited.insert(block) { + self.stack.push(block); + match domtree.idom(block) { + Some(idom) => { + block = layout.inst_block(idom).expect("idom not in layout") + } None => break, } } @@ -105,32 +107,32 @@ mod tests { let mut topo = TopoOrder::new(); assert_eq!(topo.next(&func.layout, &domtree), None); - topo.reset(func.layout.ebbs()); + topo.reset(func.layout.blocks()); assert_eq!(topo.next(&func.layout, &domtree), None); } #[test] fn simple() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.ins().jump(ebb1, &[]); - cur.insert_ebb(ebb1); - cur.ins().jump(ebb1, &[]); + cur.insert_block(block0); + cur.ins().jump(block1, &[]); + cur.insert_block(block1); + cur.ins().jump(block1, &[]); } let cfg = ControlFlowGraph::with_function(&func); let domtree = DominatorTree::with_function(&func, &cfg); let mut topo = TopoOrder::new(); - topo.reset(iter::once(ebb1)); - assert_eq!(topo.next(&func.layout, &domtree), Some(ebb0)); - assert_eq!(topo.next(&func.layout, &domtree), Some(ebb1)); + topo.reset(iter::once(block1)); + assert_eq!(topo.next(&func.layout, &domtree), Some(block0)); + assert_eq!(topo.next(&func.layout, &domtree), Some(block1)); assert_eq!(topo.next(&func.layout, &domtree), None); } } diff --git a/cranelift/codegen/src/unreachable_code.rs b/cranelift/codegen/src/unreachable_code.rs index 95e2955245..63e3e230f8 100644 --- a/cranelift/codegen/src/unreachable_code.rs +++ b/cranelift/codegen/src/unreachable_code.rs @@ -9,7 +9,7 @@ use log::debug; /// Eliminate unreachable code. /// -/// This pass deletes whole EBBs that can't be reached from the entry block. It does not delete +/// This pass deletes whole blocks that can't be reached from the entry block. It does not delete /// individual instructions whose results are unused. /// /// The reachability analysis is performed by the dominator tree analysis. @@ -20,27 +20,27 @@ pub fn eliminate_unreachable_code( ) { let _tt = timing::unreachable_code(); let mut pos = FuncCursor::new(func); - while let Some(ebb) = pos.next_ebb() { - if domtree.is_reachable(ebb) { + while let Some(block) = pos.next_block() { + if domtree.is_reachable(block) { continue; } - debug!("Eliminating unreachable {}", ebb); + debug!("Eliminating unreachable {}", block); // Move the cursor out of the way and make sure the next lop iteration goes to the right - // EBB. - pos.prev_ebb(); + // block. + pos.prev_block(); - // Remove all instructions from `ebb`. - while let Some(inst) = pos.func.layout.first_inst(ebb) { + // Remove all instructions from `block`. + while let Some(inst) = pos.func.layout.first_inst(block) { debug!(" - {}", pos.func.dfg.display_inst(inst, None)); pos.func.layout.remove_inst(inst); } - // Once the EBB is completely empty, we can update the CFG which removes it from any + // Once the block is completely empty, we can update the CFG which removes it from any // predecessor lists. - cfg.recompute_ebb(pos.func, ebb); + cfg.recompute_block(pos.func, block); - // Finally, remove the EBB from the layout. - pos.func.layout.remove_ebb(ebb); + // Finally, remove the block from the layout. + pos.func.layout.remove_block(block); } } diff --git a/cranelift/codegen/src/value_label.rs b/cranelift/codegen/src/value_label.rs index aab8792536..94e5c58171 100644 --- a/cranelift/codegen/src/value_label.rs +++ b/cranelift/codegen/src/value_label.rs @@ -93,8 +93,8 @@ where { let values_labels = build_value_labels_index::(func); - let mut ebbs = func.layout.ebbs().collect::>(); - ebbs.sort_by_key(|ebb| func.offsets[*ebb]); // Ensure inst offsets always increase + let mut blocks = func.layout.blocks().collect::>(); + blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase let encinfo = isa.encoding_info(); let values_locations = &func.locations; let liveness_ranges = regalloc.liveness().ranges(); @@ -117,16 +117,16 @@ where let mut end_offset = 0; let mut tracked_values: Vec<(Value, ValueLabel, u32, ValueLoc)> = Vec::new(); let mut divert = RegDiversions::new(); - for ebb in ebbs { - divert.at_ebb(&func.entry_diversions, ebb); + for block in blocks { + divert.at_block(&func.entry_diversions, block); let mut last_srcloc: Option = None; - for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) { + for (offset, inst, size) in func.inst_offsets(block, &encinfo) { divert.apply(&func.dfg[inst]); end_offset = offset + size; // Remove killed values. tracked_values.retain(|(x, label, start_offset, last_loc)| { let range = liveness_ranges.get(*x); - if range.expect("value").killed_at(inst, ebb, &func.layout) { + if range.expect("value").killed_at(inst, block, &func.layout) { add_range(*label, (*start_offset, end_offset), *last_loc); return false; } @@ -173,7 +173,7 @@ where // Ignore dead/inactive Values. let range = liveness_ranges.get(*v); match range { - Some(r) => r.reaches_use(inst, ebb, &func.layout), + Some(r) => r.reaches_use(inst, block, &func.layout), None => false, } }); diff --git a/cranelift/codegen/src/verifier/cssa.rs b/cranelift/codegen/src/verifier/cssa.rs index 54e88dccf3..f1ff72597a 100644 --- a/cranelift/codegen/src/verifier/cssa.rs +++ b/cranelift/codegen/src/verifier/cssa.rs @@ -2,7 +2,7 @@ use crate::dbg::DisplayList; use crate::dominator_tree::{DominatorTree, DominatorTreePreorder}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir::{ExpandedProgramPoint, Function}; use crate::regalloc::liveness::Liveness; use crate::regalloc::virtregs::VirtRegs; @@ -13,7 +13,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult}; /// /// Conventional SSA form is represented in Cranelift with the help of virtual registers: /// -/// - Two values are said to be *PHI-related* if one is an EBB argument and the other is passed as +/// - Two values are said to be *PHI-related* if one is an block argument and the other is passed as /// a branch argument in a location that matches the first value. /// - PHI-related values must belong to the same virtual register. /// - Two values in the same virtual register must not have overlapping live ranges. @@ -76,10 +76,10 @@ impl<'a> CssaVerifier<'a> { // Check topological ordering with the previous values in the virtual register. let def: ExpandedProgramPoint = self.func.dfg.value_def(val).into(); - let def_ebb = self.func.layout.pp_ebb(def); + let def_block = self.func.layout.pp_block(def); for &prev_val in &values[0..idx] { let prev_def: ExpandedProgramPoint = self.func.dfg.value_def(prev_val).into(); - let prev_ebb = self.func.layout.pp_ebb(prev_def); + let prev_block = self.func.layout.pp_block(prev_def); if prev_def == def { return errors.fatal(( @@ -95,7 +95,7 @@ impl<'a> CssaVerifier<'a> { } // Enforce topological ordering of defs in the virtual register. - if self.preorder.dominates(def_ebb, prev_ebb) + if self.preorder.dominates(def_block, prev_block) && self.domtree.dominates(def, prev_def, &self.func.layout) { return errors.fatal(( @@ -115,12 +115,12 @@ impl<'a> CssaVerifier<'a> { // We only have to check against the nearest dominating value. for &prev_val in values[0..idx].iter().rev() { let prev_def: ExpandedProgramPoint = self.func.dfg.value_def(prev_val).into(); - let prev_ebb = self.func.layout.pp_ebb(prev_def); + let prev_block = self.func.layout.pp_block(prev_def); - if self.preorder.dominates(prev_ebb, def_ebb) + if self.preorder.dominates(prev_block, def_block) && self.domtree.dominates(prev_def, def, &self.func.layout) { - if self.liveness[prev_val].overlaps_def(def, def_ebb, &self.func.layout) { + if self.liveness[prev_val].overlaps_def(def, def_block, &self.func.layout) { return errors.fatal(( val, format!( @@ -142,24 +142,24 @@ impl<'a> CssaVerifier<'a> { } fn check_cssa(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - for ebb in self.func.layout.ebbs() { - let ebb_params = self.func.dfg.ebb_params(ebb); - for BasicBlock { inst: pred, .. } in self.cfg.pred_iter(ebb) { + for block in self.func.layout.blocks() { + let block_params = self.func.dfg.block_params(block); + for BlockPredecessor { inst: pred, .. } in self.cfg.pred_iter(block) { let pred_args = self.func.dfg.inst_variable_args(pred); // This should have been caught by an earlier verifier pass. assert_eq!( - ebb_params.len(), + block_params.len(), pred_args.len(), "Wrong arguments on branch." ); - for (&ebb_param, &pred_arg) in ebb_params.iter().zip(pred_args) { - if !self.virtregs.same_class(ebb_param, pred_arg) { + for (&block_param, &pred_arg) in block_params.iter().zip(pred_args) { + if !self.virtregs.same_class(block_param, pred_arg) { return errors.fatal(( pred, format!( "{} and {} must be in the same virtual register", - ebb_param, pred_arg + block_param, pred_arg ), )); } diff --git a/cranelift/codegen/src/verifier/flags.rs b/cranelift/codegen/src/verifier/flags.rs index 1748fccf71..1a20303d20 100644 --- a/cranelift/codegen/src/verifier/flags.rs +++ b/cranelift/codegen/src/verifier/flags.rs @@ -1,7 +1,7 @@ //! Verify CPU flags values. use crate::entity::{EntitySet, SecondaryMap}; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir; use crate::ir::instructions::BranchInfo; use crate::isa; @@ -42,33 +42,33 @@ struct FlagsVerifier<'a> { cfg: &'a ControlFlowGraph, encinfo: Option, - /// The single live-in flags value (if any) for each EBB. - livein: SecondaryMap>, + /// The single live-in flags value (if any) for each block. + livein: SecondaryMap>, } impl<'a> FlagsVerifier<'a> { fn check(&mut self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - // List of EBBs that need to be processed. EBBs may be re-added to this list when we detect + // List of blocks that need to be processed. blocks may be re-added to this list when we detect // that one of their successor blocks needs a live-in flags value. - let mut worklist = EntitySet::with_capacity(self.func.layout.ebb_capacity()); - for ebb in self.func.layout.ebbs() { - worklist.insert(ebb); + let mut worklist = EntitySet::with_capacity(self.func.layout.block_capacity()); + for block in self.func.layout.blocks() { + worklist.insert(block); } - while let Some(ebb) = worklist.pop() { - if let Some(value) = self.visit_ebb(ebb, errors)? { - // The EBB has live-in flags. Check if the value changed. - match self.livein[ebb].expand() { - // Revisit any predecessor blocks the first time we see a live-in for `ebb`. + while let Some(block) = worklist.pop() { + if let Some(value) = self.visit_block(block, errors)? { + // The block has live-in flags. Check if the value changed. + match self.livein[block].expand() { + // Revisit any predecessor blocks the first time we see a live-in for `block`. None => { - self.livein[ebb] = value.into(); - for BasicBlock { ebb: pred, .. } in self.cfg.pred_iter(ebb) { + self.livein[block] = value.into(); + for BlockPredecessor { block: pred, .. } in self.cfg.pred_iter(block) { worklist.insert(pred); } } Some(old) if old != value => { return errors.fatal(( - ebb, + block, format!("conflicting live-in CPU flags: {} and {}", old, value), )); } @@ -76,24 +76,24 @@ impl<'a> FlagsVerifier<'a> { } } else { // Existing live-in flags should never be able to disappear. - assert_eq!(self.livein[ebb].expand(), None); + assert_eq!(self.livein[block].expand(), None); } } Ok(()) } - /// Check flags usage in `ebb` and return the live-in flags value, if any. - fn visit_ebb( + /// Check flags usage in `block` and return the live-in flags value, if any. + fn visit_block( &self, - ebb: ir::Ebb, + block: ir::Block, errors: &mut VerifierErrors, ) -> VerifierStepResult> { // The single currently live flags value. let mut live_val = None; // Visit instructions backwards so we can track liveness accurately. - for inst in self.func.layout.ebb_insts(ebb).rev() { + for inst in self.func.layout.block_insts(block).rev() { // Check if `inst` interferes with existing live flags. if let Some(live) = live_val { for &res in self.func.dfg.inst_results(inst) { @@ -130,7 +130,7 @@ impl<'a> FlagsVerifier<'a> { } } - // Include live-in flags to successor EBBs. + // Include live-in flags to successor blocks. match self.func.dfg.analyze_branch(inst) { BranchInfo::NotABranch => {} BranchInfo::SingleDest(dest, _) => { diff --git a/cranelift/codegen/src/verifier/liveness.rs b/cranelift/codegen/src/verifier/liveness.rs index 4c4940f356..921babc6a0 100644 --- a/cranelift/codegen/src/verifier/liveness.rs +++ b/cranelift/codegen/src/verifier/liveness.rs @@ -1,6 +1,6 @@ //! Liveness verifier. -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir::entities::AnyEntity; use crate::ir::{ExpandedProgramPoint, Function, ProgramPoint, Value}; use crate::isa::TargetIsa; @@ -16,7 +16,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult}; /// - All values in the program must have a live range. /// - The live range def point must match where the value is defined. /// - The live range must reach all uses. -/// - When a live range is live-in to an EBB, it must be live at all the predecessors. +/// - When a live range is live-in to an block, it must be live at all the predecessors. /// - The live range affinity must be compatible with encoding constraints. /// /// We don't verify that live ranges are minimal. This would require recomputing live ranges for @@ -35,7 +35,7 @@ pub fn verify_liveness( cfg, liveness, }; - verifier.check_ebbs(errors)?; + verifier.check_blocks(errors)?; verifier.check_insts(errors)?; Ok(()) } @@ -48,17 +48,18 @@ struct LivenessVerifier<'a> { } impl<'a> LivenessVerifier<'a> { - /// Check all EBB arguments. - fn check_ebbs(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - for ebb in self.func.layout.ebbs() { - for &val in self.func.dfg.ebb_params(ebb) { + /// Check all block arguments. + fn check_blocks(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { + for block in self.func.layout.blocks() { + for &val in self.func.dfg.block_params(block) { let lr = match self.liveness.get(val) { Some(lr) => lr, None => { - return errors.fatal((ebb, format!("EBB arg {} has no live range", val))) + return errors + .fatal((block, format!("block arg {} has no live range", val))) } }; - self.check_lr(ebb.into(), val, lr, errors)?; + self.check_lr(block.into(), val, lr, errors)?; } } Ok(()) @@ -66,8 +67,8 @@ impl<'a> LivenessVerifier<'a> { /// Check all instructions. fn check_insts(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - for ebb in self.func.layout.ebbs() { - for inst in self.func.layout.ebb_insts(ebb) { + for block in self.func.layout.blocks() { + for inst in self.func.layout.block_insts(block) { let encoding = self.func.encodings[inst]; // Check the defs. @@ -110,8 +111,8 @@ impl<'a> LivenessVerifier<'a> { None => return errors.fatal((inst, format!("{} has no live range", val))), }; - debug_assert!(self.func.layout.inst_ebb(inst).unwrap() == ebb); - if !lr.reaches_use(inst, ebb, &self.func.layout) { + debug_assert!(self.func.layout.inst_block(inst).unwrap() == block); + if !lr.reaches_use(inst, block, &self.func.layout) { return errors.fatal((inst, format!("{} is not live at this use", val))); } @@ -143,7 +144,7 @@ impl<'a> LivenessVerifier<'a> { let l = &self.func.layout; let loc: AnyEntity = match def.into() { - ExpandedProgramPoint::Ebb(e) => e.into(), + ExpandedProgramPoint::Block(e) => e.into(), ExpandedProgramPoint::Inst(i) => i.into(), }; if lr.def() != def { @@ -159,66 +160,70 @@ impl<'a> LivenessVerifier<'a> { return Ok(()); } } - let def_ebb = match def.into() { - ExpandedProgramPoint::Ebb(e) => e, - ExpandedProgramPoint::Inst(i) => l.inst_ebb(i).unwrap(), + let def_block = match def.into() { + ExpandedProgramPoint::Block(e) => e, + ExpandedProgramPoint::Inst(i) => l.inst_block(i).unwrap(), }; match lr.def_local_end().into() { - ExpandedProgramPoint::Ebb(e) => { + ExpandedProgramPoint::Block(e) => { return errors.fatal(( loc, format!("Def local range for {} can't end at {}", val, e), )); } ExpandedProgramPoint::Inst(i) => { - if self.func.layout.inst_ebb(i) != Some(def_ebb) { - return errors.fatal((loc, format!("Def local end for {} in wrong ebb", val))); + if self.func.layout.inst_block(i) != Some(def_block) { + return errors + .fatal((loc, format!("Def local end for {} in wrong block", val))); } } } // Now check the live-in intervals against the CFG. - for (mut ebb, end) in lr.liveins() { - if !l.is_ebb_inserted(ebb) { + for (mut block, end) in lr.liveins() { + if !l.is_block_inserted(block) { return errors.fatal(( loc, - format!("{} livein at {} which is not in the layout", val, ebb), + format!("{} livein at {} which is not in the layout", val, block), )); } - let end_ebb = match l.inst_ebb(end) { + let end_block = match l.inst_block(end) { Some(e) => e, None => { return errors.fatal(( loc, format!( "{} livein for {} ends at {} which is not in the layout", - val, ebb, end + val, block, end ), )); } }; - // Check all the EBBs in the interval independently. + // Check all the blocks in the interval independently. loop { - // If `val` is live-in at `ebb`, it must be live at all the predecessors. - for BasicBlock { inst: pred, ebb } in self.cfg.pred_iter(ebb) { - if !lr.reaches_use(pred, ebb, &self.func.layout) { + // If `val` is live-in at `block`, it must be live at all the predecessors. + for BlockPredecessor { inst: pred, block } in self.cfg.pred_iter(block) { + if !lr.reaches_use(pred, block, &self.func.layout) { return errors.fatal(( pred, - format!("{} is live in to {} but not live at predecessor", val, ebb), + format!( + "{} is live in to {} but not live at predecessor", + val, block + ), )); } } - if ebb == end_ebb { + if block == end_block { break; } - ebb = match l.next_ebb(ebb) { + block = match l.next_block(block) { Some(e) => e, None => { return errors.fatal(( loc, - format!("end of {} livein ({}) never reached", val, end_ebb), + format!("end of {} livein ({}) never reached", val, end_block), )); } }; diff --git a/cranelift/codegen/src/verifier/locations.rs b/cranelift/codegen/src/verifier/locations.rs index fe180b8e81..287413a412 100644 --- a/cranelift/codegen/src/verifier/locations.rs +++ b/cranelift/codegen/src/verifier/locations.rs @@ -15,7 +15,7 @@ use crate::verifier::{VerifierErrors, VerifierStepResult}; /// instruction encoding recipes. /// /// Values can be temporarily diverted to a different location by using the `regmove`, `regspill`, -/// and `regfill` instructions, but only inside an EBB. +/// and `regfill` instructions, but only inside an block. /// /// If a liveness analysis is provided, it is used to verify that there are no active register /// diversions across control flow edges. @@ -54,11 +54,11 @@ impl<'a> LocationVerifier<'a> { let dfg = &self.func.dfg; let mut divert = RegDiversions::new(); - for ebb in self.func.layout.ebbs() { - divert.at_ebb(&self.func.entry_diversions, ebb); + for block in self.func.layout.blocks() { + divert.at_block(&self.func.entry_diversions, block); let mut is_after_branch = false; - for inst in self.func.layout.ebb_insts(ebb) { + for inst in self.func.layout.block_insts(block) { let enc = self.func.encodings[inst]; if enc.is_legal() { @@ -332,24 +332,24 @@ impl<'a> LocationVerifier<'a> { "No branch information for {}", dfg.display_inst(inst, self.isa) ), - SingleDest(ebb, _) => { - let unique_predecessor = self.cfg.pred_iter(ebb).count() == 1; + SingleDest(block, _) => { + let unique_predecessor = self.cfg.pred_iter(block).count() == 1; let mut val_to_remove = vec![]; for (&value, d) in divert.iter() { let lr = &liveness[value]; if is_after_branch && unique_predecessor { // Forward diversions based on the targeted branch. - if !lr.is_livein(ebb, &self.func.layout) { + if !lr.is_livein(block, &self.func.layout) { val_to_remove.push(value) } - } else if lr.is_livein(ebb, &self.func.layout) { + } else if lr.is_livein(block, &self.func.layout) { return errors.fatal(( inst, format!( "SingleDest: {} is diverted to {} and live in to {}", value, d.to.display(&self.reginfo), - ebb, + block, ), )); } @@ -358,34 +358,34 @@ impl<'a> LocationVerifier<'a> { for val in val_to_remove.into_iter() { divert.remove(val); } - debug_assert!(divert.check_ebb_entry(&self.func.entry_diversions, ebb)); + debug_assert!(divert.check_block_entry(&self.func.entry_diversions, block)); } } - Table(jt, ebb) => { + Table(jt, block) => { for (&value, d) in divert.iter() { let lr = &liveness[value]; - if let Some(ebb) = ebb { - if lr.is_livein(ebb, &self.func.layout) { + if let Some(block) = block { + if lr.is_livein(block, &self.func.layout) { return errors.fatal(( inst, format!( "Table.default: {} is diverted to {} and live in to {}", value, d.to.display(&self.reginfo), - ebb, + block, ), )); } } - for ebb in self.func.jump_tables[jt].iter() { - if lr.is_livein(*ebb, &self.func.layout) { + for block in self.func.jump_tables[jt].iter() { + if lr.is_livein(*block, &self.func.layout) { return errors.fatal(( inst, format!( "Table.case: {} is diverted to {} and live in to {}", value, d.to.display(&self.reginfo), - ebb, + block, ), )); } diff --git a/cranelift/codegen/src/verifier/mod.rs b/cranelift/codegen/src/verifier/mod.rs index 9833a8c0f5..58dbe259c8 100644 --- a/cranelift/codegen/src/verifier/mod.rs +++ b/cranelift/codegen/src/verifier/mod.rs @@ -1,40 +1,40 @@ //! A verifier for ensuring that functions are well formed. //! It verifies: //! -//! EBB integrity +//! block integrity //! -//! - All instructions reached from the `ebb_insts` iterator must belong to -//! the EBB as reported by `inst_ebb()`. -//! - Every EBB must end in a terminator instruction, and no other instruction +//! - All instructions reached from the `block_insts` iterator must belong to +//! the block as reported by `inst_block()`. +//! - Every block must end in a terminator instruction, and no other instruction //! can be a terminator. -//! - Every value in the `ebb_params` iterator belongs to the EBB as reported by `value_ebb`. +//! - Every value in the `block_params` iterator belongs to the block as reported by `value_block`. //! //! Instruction integrity //! //! - The instruction format must match the opcode. //! - All result values must be created for multi-valued instructions. -//! - All referenced entities must exist. (Values, EBBs, stack slots, ...) +//! - All referenced entities must exist. (Values, blocks, stack slots, ...) //! - Instructions must not reference (eg. branch to) the entry block. //! //! SSA form //! //! - Values must be defined by an instruction that exists and that is inserted in -//! an EBB, or be an argument of an existing EBB. +//! an block, or be an argument of an existing block. //! - Values used by an instruction must dominate the instruction. //! //! Control flow graph and dominator tree integrity: //! -//! - All predecessors in the CFG must be branches to the EBB. -//! - All branches to an EBB must be present in the CFG. +//! - All predecessors in the CFG must be branches to the block. +//! - All branches to an block must be present in the CFG. //! - A recomputed dominator tree is identical to the existing one. //! //! Type checking //! //! - Compare input and output values against the opcode's type constraints. //! For polymorphic opcodes, determine the controlling type variable first. -//! - Branches and jumps must pass arguments to destination EBBs that match the +//! - Branches and jumps must pass arguments to destination blocks that match the //! expected types exactly. The number of arguments must match. -//! - All EBBs in a jump table must take no arguments. +//! - All blocks in a jump table must take no arguments. //! - Function calls are type checked against their signature. //! - The entry block must take arguments that match the signature of the current //! function. @@ -60,12 +60,12 @@ use self::flags::verify_flags; use crate::dbg::DisplayList; use crate::dominator_tree::DominatorTree; use crate::entity::SparseSet; -use crate::flowgraph::{BasicBlock, ControlFlowGraph}; +use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; use crate::ir; use crate::ir::entities::AnyEntity; use crate::ir::instructions::{BranchInfo, CallInfo, InstructionFormat, ResolvedConstraint}; use crate::ir::{ - types, ArgumentLoc, Ebb, FuncRef, Function, GlobalValue, Inst, InstructionData, JumpTable, + types, ArgumentLoc, Block, FuncRef, Function, GlobalValue, Inst, InstructionData, JumpTable, Opcode, SigRef, StackSlot, StackSlotKind, Type, Value, ValueDef, ValueList, ValueLoc, }; use crate::isa::TargetIsa; @@ -495,30 +495,30 @@ impl<'a> Verifier<'a> { fn verify_jump_tables(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { for (jt, jt_data) in &self.func.jump_tables { - for &ebb in jt_data.iter() { - self.verify_ebb(jt, ebb, errors)?; + for &block in jt_data.iter() { + self.verify_block(jt, block, errors)?; } } Ok(()) } - /// Check that the given EBB can be encoded as a BB, by checking that only - /// branching instructions are ending the EBB. - fn encodable_as_bb(&self, ebb: Ebb, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - match self.func.is_ebb_basic(ebb) { + /// Check that the given block can be encoded as a BB, by checking that only + /// branching instructions are ending the block. + fn encodable_as_bb(&self, block: Block, errors: &mut VerifierErrors) -> VerifierStepResult<()> { + match self.func.is_block_basic(block) { Ok(()) => Ok(()), Err((inst, message)) => errors.fatal((inst, self.context(inst), message)), } } - fn ebb_integrity( + fn block_integrity( &self, - ebb: Ebb, + block: Block, inst: Inst, errors: &mut VerifierErrors, ) -> VerifierStepResult<()> { let is_terminator = self.func.dfg[inst].opcode().is_terminator(); - let is_last_inst = self.func.layout.last_inst(ebb) == Some(inst); + let is_last_inst = self.func.layout.last_inst(block) == Some(inst); if is_terminator && !is_last_inst { // Terminating instructions only occur at the end of blocks. @@ -527,30 +527,30 @@ impl<'a> Verifier<'a> { self.context(inst), format!( "a terminator instruction was encountered before the end of {}", - ebb + block ), )); } if is_last_inst && !is_terminator { - return errors.fatal((ebb, "block does not end in a terminator instruction")); + return errors.fatal((block, "block does not end in a terminator instruction")); } - // Instructions belong to the correct ebb. - let inst_ebb = self.func.layout.inst_ebb(inst); - if inst_ebb != Some(ebb) { + // Instructions belong to the correct block. + let inst_block = self.func.layout.inst_block(inst); + if inst_block != Some(block) { return errors.fatal(( inst, self.context(inst), - format!("should belong to {} not {:?}", ebb, inst_ebb), + format!("should belong to {} not {:?}", block, inst_block), )); } - // Parameters belong to the correct ebb. - for &arg in self.func.dfg.ebb_params(ebb) { + // Parameters belong to the correct block. + for &arg in self.func.dfg.block_params(block) { match self.func.dfg.value_def(arg) { - ValueDef::Param(arg_ebb, _) => { - if ebb != arg_ebb { - return errors.fatal((arg, format!("does not belong to {}", ebb))); + ValueDef::Param(arg_block, _) => { + if block != arg_block { + return errors.fatal((arg, format!("does not belong to {}", block))); } } _ => { @@ -656,13 +656,13 @@ impl<'a> Verifier<'a> { ref args, .. } => { - self.verify_ebb(inst, destination, errors)?; + self.verify_block(inst, destination, errors)?; self.verify_value_list(inst, args, errors)?; } BranchTable { table, destination, .. } => { - self.verify_ebb(inst, destination, errors)?; + self.verify_block(inst, destination, errors)?; self.verify_jump_table(inst, table, errors)?; } BranchTableBase { table, .. } @@ -775,18 +775,18 @@ impl<'a> Verifier<'a> { Ok(()) } - fn verify_ebb( + fn verify_block( &self, loc: impl Into, - e: Ebb, + e: Block, errors: &mut VerifierErrors, ) -> VerifierStepResult<()> { - if !self.func.dfg.ebb_is_valid(e) || !self.func.layout.is_ebb_inserted(e) { - return errors.fatal((loc, format!("invalid ebb reference {}", e))); + if !self.func.dfg.block_is_valid(e) || !self.func.layout.is_block_inserted(e) { + return errors.fatal((loc, format!("invalid block reference {}", e))); } if let Some(entry_block) = self.func.layout.entry_block() { if e == entry_block { - return errors.fatal((loc, format!("invalid reference to entry ebb {}", e))); + return errors.fatal((loc, format!("invalid reference to entry block {}", e))); } } Ok(()) @@ -947,8 +947,8 @@ impl<'a> Verifier<'a> { self.verify_value(loc_inst, v, errors)?; let dfg = &self.func.dfg; - let loc_ebb = self.func.layout.pp_ebb(loc_inst); - let is_reachable = self.expected_domtree.is_reachable(loc_ebb); + let loc_block = self.func.layout.pp_block(loc_inst); + let is_reachable = self.expected_domtree.is_reachable(loc_block); // SSA form match dfg.value_def(v) { @@ -961,12 +961,12 @@ impl<'a> Verifier<'a> { format!("{} is defined by invalid instruction {}", v, def_inst), )); } - // Defining instruction is inserted in an EBB. - if self.func.layout.inst_ebb(def_inst) == None { + // Defining instruction is inserted in an block. + if self.func.layout.inst_block(def_inst) == None { return errors.fatal(( loc_inst, self.context(loc_inst), - format!("{} is defined by {} which has no EBB", v, def_inst), + format!("{} is defined by {} which has no block", v, def_inst), )); } // Defining instruction dominates the instruction that uses the value. @@ -990,33 +990,33 @@ impl<'a> Verifier<'a> { } } } - ValueDef::Param(ebb, _) => { - // Value is defined by an existing EBB. - if !dfg.ebb_is_valid(ebb) { + ValueDef::Param(block, _) => { + // Value is defined by an existing block. + if !dfg.block_is_valid(block) { return errors.fatal(( loc_inst, self.context(loc_inst), - format!("{} is defined by invalid EBB {}", v, ebb), + format!("{} is defined by invalid block {}", v, block), )); } - // Defining EBB is inserted in the layout - if !self.func.layout.is_ebb_inserted(ebb) { + // Defining block is inserted in the layout + if !self.func.layout.is_block_inserted(block) { return errors.fatal(( loc_inst, self.context(loc_inst), - format!("{} is defined by {} which is not in the layout", v, ebb), + format!("{} is defined by {} which is not in the layout", v, block), )); } - // The defining EBB dominates the instruction using this value. + // The defining block dominates the instruction using this value. if is_reachable && !self .expected_domtree - .dominates(ebb, loc_inst, &self.func.layout) + .dominates(block, loc_inst, &self.func.layout) { return errors.fatal(( loc_inst, self.context(loc_inst), - format!("uses value arg from non-dominating {}", ebb), + format!("uses value arg from non-dominating {}", block), )); } } @@ -1081,17 +1081,17 @@ impl<'a> Verifier<'a> { errors: &mut VerifierErrors, ) -> VerifierStepResult<()> { // We consider two `DominatorTree`s to be equal if they return the same immediate - // dominator for each EBB. Therefore the current domtree is valid if it matches the freshly + // dominator for each block. Therefore the current domtree is valid if it matches the freshly // computed one. - for ebb in self.func.layout.ebbs() { - let expected = self.expected_domtree.idom(ebb); - let got = domtree.idom(ebb); + for block in self.func.layout.blocks() { + let expected = self.expected_domtree.idom(block); + let got = domtree.idom(block); if got != expected { return errors.fatal(( - ebb, + block, format!( "invalid domtree, expected idom({}) = {:?}, got {:?}", - ebb, expected, got + block, expected, got ), )); } @@ -1100,37 +1100,37 @@ impl<'a> Verifier<'a> { if domtree.cfg_postorder().len() != self.expected_domtree.cfg_postorder().len() { return errors.fatal(( AnyEntity::Function, - "incorrect number of Ebbs in postorder traversal", + "incorrect number of Blocks in postorder traversal", )); } - for (index, (&test_ebb, &true_ebb)) in domtree + for (index, (&test_block, &true_block)) in domtree .cfg_postorder() .iter() .zip(self.expected_domtree.cfg_postorder().iter()) .enumerate() { - if test_ebb != true_ebb { + if test_block != true_block { return errors.fatal(( - test_ebb, + test_block, format!( - "invalid domtree, postorder ebb number {} should be {}, got {}", - index, true_ebb, test_ebb + "invalid domtree, postorder block number {} should be {}, got {}", + index, true_block, test_block ), )); } } - // We verify rpo_cmp on pairs of adjacent ebbs in the postorder - for (&prev_ebb, &next_ebb) in domtree.cfg_postorder().iter().adjacent_pairs() { + // We verify rpo_cmp on pairs of adjacent blocks in the postorder + for (&prev_block, &next_block) in domtree.cfg_postorder().iter().adjacent_pairs() { if self .expected_domtree - .rpo_cmp(prev_ebb, next_ebb, &self.func.layout) + .rpo_cmp(prev_block, next_block, &self.func.layout) != Ordering::Greater { return errors.fatal(( - next_ebb, + next_block, format!( "invalid domtree, rpo_cmp does not says {} is greater than {}", - prev_ebb, next_ebb + prev_block, next_block ), )); } @@ -1139,26 +1139,26 @@ impl<'a> Verifier<'a> { } fn typecheck_entry_block_params(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> { - if let Some(ebb) = self.func.layout.entry_block() { + if let Some(block) = self.func.layout.entry_block() { let expected_types = &self.func.signature.params; - let ebb_param_count = self.func.dfg.num_ebb_params(ebb); + let block_param_count = self.func.dfg.num_block_params(block); - if ebb_param_count != expected_types.len() { + if block_param_count != expected_types.len() { return errors.fatal(( - ebb, + block, format!( "entry block parameters ({}) must match function signature ({})", - ebb_param_count, + block_param_count, expected_types.len() ), )); } - for (i, &arg) in self.func.dfg.ebb_params(ebb).iter().enumerate() { + for (i, &arg) in self.func.dfg.block_params(block).iter().enumerate() { let arg_type = self.func.dfg.value_type(arg); if arg_type != expected_types[i].value_type { errors.report(( - ebb, + block, format!( "entry block parameter {} expected to have type {}, got {}", i, expected_types[i], arg_type @@ -1295,38 +1295,38 @@ impl<'a> Verifier<'a> { errors: &mut VerifierErrors, ) -> VerifierStepResult<()> { match self.func.dfg.analyze_branch(inst) { - BranchInfo::SingleDest(ebb, _) => { + BranchInfo::SingleDest(block, _) => { let iter = self .func .dfg - .ebb_params(ebb) + .block_params(block) .iter() .map(|&v| self.func.dfg.value_type(v)); self.typecheck_variable_args_iterator(inst, iter, errors)?; } - BranchInfo::Table(table, ebb) => { - if let Some(ebb) = ebb { - let arg_count = self.func.dfg.num_ebb_params(ebb); + BranchInfo::Table(table, block) => { + if let Some(block) = block { + let arg_count = self.func.dfg.num_block_params(block); if arg_count != 0 { return errors.nonfatal(( inst, self.context(inst), format!( "takes no arguments, but had target {} with {} arguments", - ebb, arg_count, + block, arg_count, ), )); } } - for ebb in self.func.jump_tables[table].iter() { - let arg_count = self.func.dfg.num_ebb_params(*ebb); + for block in self.func.jump_tables[table].iter() { + let arg_count = self.func.dfg.num_block_params(*block); if arg_count != 0 { return errors.nonfatal(( inst, self.context(inst), format!( "takes no arguments, but had target {} with {} arguments", - ebb, arg_count, + block, arg_count, ), )); } @@ -1658,28 +1658,29 @@ impl<'a> Verifier<'a> { cfg: &ControlFlowGraph, errors: &mut VerifierErrors, ) -> VerifierStepResult<()> { - let mut expected_succs = BTreeSet::::new(); - let mut got_succs = BTreeSet::::new(); + let mut expected_succs = BTreeSet::::new(); + let mut got_succs = BTreeSet::::new(); let mut expected_preds = BTreeSet::::new(); let mut got_preds = BTreeSet::::new(); - for ebb in self.func.layout.ebbs() { - expected_succs.extend(self.expected_cfg.succ_iter(ebb)); - got_succs.extend(cfg.succ_iter(ebb)); + for block in self.func.layout.blocks() { + expected_succs.extend(self.expected_cfg.succ_iter(block)); + got_succs.extend(cfg.succ_iter(block)); - let missing_succs: Vec = expected_succs.difference(&got_succs).cloned().collect(); + let missing_succs: Vec = + expected_succs.difference(&got_succs).cloned().collect(); if !missing_succs.is_empty() { errors.report(( - ebb, + block, format!("cfg lacked the following successor(s) {:?}", missing_succs), )); continue; } - let excess_succs: Vec = got_succs.difference(&expected_succs).cloned().collect(); + let excess_succs: Vec = got_succs.difference(&expected_succs).cloned().collect(); if !excess_succs.is_empty() { errors.report(( - ebb, + block, format!("cfg had unexpected successor(s) {:?}", excess_succs), )); continue; @@ -1687,15 +1688,18 @@ impl<'a> Verifier<'a> { expected_preds.extend( self.expected_cfg - .pred_iter(ebb) - .map(|BasicBlock { inst, .. }| inst), + .pred_iter(block) + .map(|BlockPredecessor { inst, .. }| inst), + ); + got_preds.extend( + cfg.pred_iter(block) + .map(|BlockPredecessor { inst, .. }| inst), ); - got_preds.extend(cfg.pred_iter(ebb).map(|BasicBlock { inst, .. }| inst)); let missing_preds: Vec = expected_preds.difference(&got_preds).cloned().collect(); if !missing_preds.is_empty() { errors.report(( - ebb, + block, format!( "cfg lacked the following predecessor(s) {:?}", missing_preds @@ -1707,7 +1711,7 @@ impl<'a> Verifier<'a> { let excess_preds: Vec = got_preds.difference(&expected_preds).cloned().collect(); if !excess_preds.is_empty() { errors.report(( - ebb, + block, format!("cfg had unexpected predecessor(s) {:?}", excess_preds), )); continue; @@ -1969,12 +1973,12 @@ impl<'a> Verifier<'a> { self.typecheck_entry_block_params(errors)?; self.typecheck_function_signature(errors)?; - for ebb in self.func.layout.ebbs() { - if self.func.layout.first_inst(ebb).is_none() { - return errors.fatal((ebb, format!("{} cannot be empty", ebb))); + for block in self.func.layout.blocks() { + if self.func.layout.first_inst(block).is_none() { + return errors.fatal((block, format!("{} cannot be empty", block))); } - for inst in self.func.layout.ebb_insts(ebb) { - self.ebb_integrity(ebb, inst, errors)?; + for inst in self.func.layout.block_insts(block) { + self.block_integrity(block, inst, errors)?; self.instruction_integrity(inst, errors)?; self.verify_safepoint_unused(inst, errors)?; self.typecheck(inst, errors)?; @@ -1982,7 +1986,7 @@ impl<'a> Verifier<'a> { self.immediate_constraints(inst, errors)?; } - self.encodable_as_bb(ebb, errors)?; + self.encodable_as_bb(block, errors)?; } verify_flags(self.func, &self.expected_cfg, self.isa, errors)?; @@ -2039,20 +2043,20 @@ mod tests { #[test] fn bad_instruction_format() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - func.layout.append_ebb(ebb0); + let block0 = func.dfg.make_block(); + func.layout.append_block(block0); let nullary_with_bad_opcode = func.dfg.make_inst(InstructionData::UnaryImm { opcode: Opcode::F32const, imm: 0.into(), }); - func.layout.append_inst(nullary_with_bad_opcode, ebb0); + func.layout.append_inst(nullary_with_bad_opcode, block0); func.layout.append_inst( func.dfg.make_inst(InstructionData::Jump { opcode: Opcode::Jump, - destination: ebb0, + destination: block0, args: EntityList::default(), }), - ebb0, + block0, ); let flags = &settings::Flags::new(settings::builder()); let verifier = Verifier::new(&func, flags.into()); @@ -2093,8 +2097,8 @@ mod tests { fn test_printing_contextual_errors() { // Build function. let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - func.layout.append_ebb(ebb0); + let block0 = func.dfg.make_block(); + func.layout.append_block(block0); // Build instruction: v0, v1 = iconst 42 let inst = func.dfg.make_inst(InstructionData::UnaryImm { @@ -2103,7 +2107,7 @@ mod tests { }); func.dfg.append_result(inst, types::I32); func.dfg.append_result(inst, types::I32); - func.layout.append_inst(inst, ebb0); + func.layout.append_inst(inst, block0); // Setup verifier. let mut errors = VerifierErrors::default(); @@ -2120,16 +2124,16 @@ mod tests { } #[test] - fn test_empty_ebb() { + fn test_empty_block() { let mut func = Function::new(); - let ebb0 = func.dfg.make_ebb(); - func.layout.append_ebb(ebb0); + let block0 = func.dfg.make_block(); + func.layout.append_block(block0); let flags = &settings::Flags::new(settings::builder()); let verifier = Verifier::new(&func, flags.into()); let mut errors = VerifierErrors::default(); let _ = verifier.run(&mut errors); - assert_err_with_msg!(errors, "ebb0 cannot be empty"); + assert_err_with_msg!(errors, "block0 cannot be empty"); } } diff --git a/cranelift/codegen/src/write.rs b/cranelift/codegen/src/write.rs index 507b2f9f6c..6d109f4c04 100644 --- a/cranelift/codegen/src/write.rs +++ b/cranelift/codegen/src/write.rs @@ -6,8 +6,8 @@ use crate::entity::SecondaryMap; use crate::ir::entities::AnyEntity; use crate::ir::{ - DataFlowGraph, DisplayFunctionAnnotations, Ebb, Function, Inst, SigRef, Type, Value, ValueDef, - ValueLoc, + Block, DataFlowGraph, DisplayFunctionAnnotations, Function, Inst, SigRef, Type, Value, + ValueDef, ValueLoc, }; use crate::isa::{RegInfo, TargetIsa}; use crate::packed_option::ReservedValue; @@ -19,13 +19,13 @@ use core::fmt::{self, Write}; /// A `FuncWriter` used to decorate functions during printing. pub trait FuncWriter { - /// Write the extended basic block header for the current function. - fn write_ebb_header( + /// Write the basic block header for the current function. + fn write_block_header( &mut self, w: &mut dyn Write, func: &Function, isa: Option<&dyn TargetIsa>, - ebb: Ebb, + block: Block, indent: usize, ) -> fmt::Result; @@ -145,15 +145,15 @@ impl FuncWriter for PlainWriter { write_instruction(w, func, aliases, isa, inst, indent) } - fn write_ebb_header( + fn write_block_header( &mut self, w: &mut dyn Write, func: &Function, isa: Option<&dyn TargetIsa>, - ebb: Ebb, + block: Block, indent: usize, ) -> fmt::Result { - write_ebb_header(w, func, isa, ebb, indent) + write_block_header(w, func, isa, block, indent) } } @@ -196,11 +196,11 @@ pub fn decorate_function( writeln!(w, " {{")?; let aliases = alias_map(func); let mut any = func_w.write_preamble(w, func, regs)?; - for ebb in &func.layout { + for block in &func.layout { if any { writeln!(w)?; } - decorate_ebb(func_w, w, func, &aliases, annotations, ebb)?; + decorate_block(func_w, w, func, &aliases, annotations, block)?; any = true; } writeln!(w, "}}") @@ -235,24 +235,24 @@ fn write_arg( /// Write out the basic block header, outdented: /// -/// ebb1: -/// ebb1(v1: i32): -/// ebb10(v4: f64, v5: b1): +/// block1: +/// block1(v1: i32): +/// block10(v4: f64, v5: b1): /// -pub fn write_ebb_header( +pub fn write_block_header( w: &mut dyn Write, func: &Function, isa: Option<&dyn TargetIsa>, - ebb: Ebb, + block: Block, indent: usize, ) -> fmt::Result { - // The `indent` is the instruction indentation. EBB headers are 4 spaces out from that. - write!(w, "{1:0$}{2}", indent - 4, "", ebb)?; + // The `indent` is the instruction indentation. block headers are 4 spaces out from that. + write!(w, "{1:0$}{2}", indent - 4, "", block)?; let regs = isa.map(TargetIsa::register_info); let regs = regs.as_ref(); - let mut args = func.dfg.ebb_params(ebb).iter().cloned(); + let mut args = func.dfg.block_params(block).iter().cloned(); match args.next() { None => return writeln!(w, ":"), Some(arg) => { @@ -309,13 +309,13 @@ fn write_value_range_markers( Ok(()) } -fn decorate_ebb( +fn decorate_block( func_w: &mut FW, w: &mut dyn Write, func: &Function, aliases: &SecondaryMap>, annotations: &DisplayFunctionAnnotations, - ebb: Ebb, + block: Block, ) -> fmt::Result { // Indent all instructions if any encodings are present. let indent = if func.encodings.is_empty() && func.srclocs.is_empty() { @@ -325,8 +325,8 @@ fn decorate_ebb( }; let isa = annotations.isa; - func_w.write_ebb_header(w, func, isa, ebb, indent)?; - for a in func.dfg.ebb_params(ebb).iter().cloned() { + func_w.write_block_header(w, func, isa, block, indent)?; + for a in func.dfg.block_params(block).iter().cloned() { write_value_aliases(w, aliases, a, indent)?; } @@ -334,7 +334,7 @@ fn decorate_ebb( if !func.offsets.is_empty() { let encinfo = isa.encoding_info(); let regs = &isa.register_info(); - for (offset, inst, size) in func.inst_offsets(ebb, &encinfo) { + for (offset, inst, size) in func.inst_offsets(block, &encinfo) { func_w.write_instruction(w, func, aliases, Some(isa), inst, indent)?; if size > 0 { if let Some(val_ranges) = annotations.value_ranges { @@ -346,7 +346,7 @@ fn decorate_ebb( } } - for inst in func.layout.ebb_insts(ebb) { + for inst in func.layout.block_insts(block) { func_w.write_instruction(w, func, aliases, isa, inst, indent)?; } @@ -374,11 +374,11 @@ fn type_suffix(func: &Function, inst: Inst) -> Option { // operand, we don't need the type suffix. if constraints.use_typevar_operand() { let ctrl_var = inst_data.typevar_operand(&func.dfg.value_lists).unwrap(); - let def_ebb = match func.dfg.value_def(ctrl_var) { - ValueDef::Result(instr, _) => func.layout.inst_ebb(instr), - ValueDef::Param(ebb, _) => Some(ebb), + let def_block = match func.dfg.value_def(ctrl_var) { + ValueDef::Result(instr, _) => func.layout.inst_block(instr), + ValueDef::Param(block, _) => Some(block), }; - if def_ebb.is_some() && def_ebb == func.layout.inst_ebb(inst) { + if def_block.is_some() && def_block == func.layout.inst_block(inst) { return None; } } @@ -533,7 +533,7 @@ pub fn write_operands( .. } => { write!(w, " {}", destination)?; - write_ebb_args(w, args.as_slice(pool)) + write_block_args(w, args.as_slice(pool)) } Branch { destination, @@ -542,7 +542,7 @@ pub fn write_operands( } => { let args = args.as_slice(pool); write!(w, " {}, {}", args[0], destination)?; - write_ebb_args(w, &args[1..]) + write_block_args(w, &args[1..]) } BranchInt { cond, @@ -552,7 +552,7 @@ pub fn write_operands( } => { let args = args.as_slice(pool); write!(w, " {} {}, {}", cond, args[0], destination)?; - write_ebb_args(w, &args[1..]) + write_block_args(w, &args[1..]) } BranchFloat { cond, @@ -562,7 +562,7 @@ pub fn write_operands( } => { let args = args.as_slice(pool); write!(w, " {} {}, {}", cond, args[0], destination)?; - write_ebb_args(w, &args[1..]) + write_block_args(w, &args[1..]) } BranchIcmp { cond, @@ -572,7 +572,7 @@ pub fn write_operands( } => { let args = args.as_slice(pool); write!(w, " {} {}, {}, {}", cond, args[0], args[1], destination)?; - write_ebb_args(w, &args[2..]) + write_block_args(w, &args[2..]) } BranchTable { arg, @@ -714,8 +714,8 @@ pub fn write_operands( } } -/// Write EBB args using optional parantheses. -fn write_ebb_args(w: &mut dyn Write, args: &[Value]) -> fmt::Result { +/// Write block args using optional parantheses. +fn write_block_args(w: &mut dyn Write, args: &[Value]) -> fmt::Result { if args.is_empty() { Ok(()) } else { @@ -775,33 +775,33 @@ mod tests { "function %foo() fast {\n ss0 = explicit_slot 4\n}\n" ); - let ebb = f.dfg.make_ebb(); - f.layout.append_ebb(ebb); + let block = f.dfg.make_block(); + f.layout.append_block(block); assert_eq!( f.to_string(), - "function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0:\n}\n" + "function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0:\n}\n" ); - f.dfg.append_ebb_param(ebb, types::I8); + f.dfg.append_block_param(block, types::I8); assert_eq!( f.to_string(), - "function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8):\n}\n" + "function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8):\n}\n" ); - f.dfg.append_ebb_param(ebb, types::F32.by(4).unwrap()); + f.dfg.append_block_param(block, types::F32.by(4).unwrap()); assert_eq!( f.to_string(), - "function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8, v1: f32x4):\n}\n" + "function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8, v1: f32x4):\n}\n" ); { let mut cursor = FuncCursor::new(&mut f); - cursor.set_position(CursorPosition::After(ebb)); + cursor.set_position(CursorPosition::After(block)); cursor.ins().return_(&[]) }; assert_eq!( f.to_string(), - "function %foo() fast {\n ss0 = explicit_slot 4\n\nebb0(v0: i8, v1: f32x4):\n return\n}\n" + "function %foo() fast {\n ss0 = explicit_slot 4\n\nblock0(v0: i8, v1: f32x4):\n return\n}\n" ); } @@ -811,18 +811,18 @@ mod tests { let mut func = Function::new(); { - let ebb0 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); - pos.insert_ebb(ebb0); + pos.insert_block(block0); // make some detached values for change_to_alias - let v0 = pos.func.dfg.append_ebb_param(ebb0, types::I32); - let v1 = pos.func.dfg.append_ebb_param(ebb0, types::I32); - let v2 = pos.func.dfg.append_ebb_param(ebb0, types::I32); - pos.func.dfg.detach_ebb_params(ebb0); + let v0 = pos.func.dfg.append_block_param(block0, types::I32); + let v1 = pos.func.dfg.append_block_param(block0, types::I32); + let v2 = pos.func.dfg.append_block_param(block0, types::I32); + pos.func.dfg.detach_block_params(block0); - // alias to a param--will be printed at beginning of ebb defining param - let v3 = pos.func.dfg.append_ebb_param(ebb0, types::I32); + // alias to a param--will be printed at beginning of block defining param + let v3 = pos.func.dfg.append_block_param(block0, types::I32); pos.func.dfg.change_to_alias(v0, v3); // alias to an alias--should print attached to alias, not ultimate target @@ -837,7 +837,7 @@ mod tests { } assert_eq!( func.to_string(), - "function u0:0() fast {\nebb0(v3: i32):\n v0 -> v3\n v2 -> v0\n v4 = iconst.i32 42\n v5 = iadd v0, v0\n v1 -> v5\n v6 = iconst.i32 23\n v7 = iadd v1, v1\n}\n" + "function u0:0() fast {\nblock0(v3: i32):\n v0 -> v3\n v2 -> v0\n v4 = iconst.i32 42\n v5 = iadd v0, v0\n v1 -> v5\n v6 = iconst.i32 23\n v7 = iadd v1, v1\n}\n" ); } } diff --git a/cranelift/docs/callex.clif b/cranelift/docs/callex.clif index 1d93239199..5b3ba2fe3d 100644 --- a/cranelift/docs/callex.clif +++ b/cranelift/docs/callex.clif @@ -3,14 +3,14 @@ test verifier function %gcd(i32 uext, i32 uext) -> i32 uext system_v { fn0 = %divmod(i32 uext, i32 uext) -> i32 uext, i32 uext -ebb1(v0: i32, v1: i32): - brz v1, ebb3 - jump ebb2 +block1(v0: i32, v1: i32): + brz v1, block3 + jump block2 -ebb2: +block2: v2, v3 = call fn0(v0, v1) return v2 -ebb3: +block3: return v0 } diff --git a/cranelift/docs/example.clif b/cranelift/docs/example.clif index b848f7026a..a465c85d0b 100644 --- a/cranelift/docs/example.clif +++ b/cranelift/docs/example.clif @@ -3,17 +3,17 @@ test verifier function %average(i32, i32) -> f32 system_v { ss0 = explicit_slot 8 ; Stack slot for ``sum``. -ebb1(v0: i32, v1: i32): +block1(v0: i32, v1: i32): v2 = f64const 0x0.0 stack_store v2, ss0 - brz v1, ebb5 ; Handle count == 0. - jump ebb2 + brz v1, block5 ; Handle count == 0. + jump block2 -ebb2: +block2: v3 = iconst.i32 0 - jump ebb3(v3) + jump block3(v3) -ebb3(v4: i32): +block3(v4: i32): v5 = imul_imm v4, 4 v6 = iadd v0, v5 v7 = load.f32 v6 ; array[i] @@ -23,17 +23,17 @@ ebb3(v4: i32): stack_store v10, ss0 v11 = iadd_imm v4, 1 v12 = icmp ult v11, v1 - brnz v12, ebb3(v11) ; Loop backedge. - jump ebb4 + brnz v12, block3(v11) ; Loop backedge. + jump block4 -ebb4: +block4: v13 = stack_load.f64 ss0 v14 = fcvt_from_uint.f64 v1 v15 = fdiv v13, v14 v16 = fdemote.f32 v15 return v16 -ebb5: +block5: v100 = f32const +NaN return v100 } diff --git a/cranelift/docs/heapex-dyn.clif b/cranelift/docs/heapex-dyn.clif index 93c40bd29c..161bb4887a 100644 --- a/cranelift/docs/heapex-dyn.clif +++ b/cranelift/docs/heapex-dyn.clif @@ -6,7 +6,7 @@ function %add_members(i32, i64 vmctx) -> f32 baldrdash_system_v { gv2 = load.i32 notrap aligned gv0+72 heap0 = dynamic gv1, min 0x1000, bound gv2, offset_guard 0 -ebb0(v0: i32, v6: i64): +block0(v0: i32, v6: i64): v1 = heap_addr.i64 heap0, v0, 20 v2 = load.f32 v1+16 v3 = heap_addr.i64 heap0, v0, 24 diff --git a/cranelift/docs/heapex-sm32.clif b/cranelift/docs/heapex-sm32.clif index acd38a6564..ce8fffb914 100644 --- a/cranelift/docs/heapex-sm32.clif +++ b/cranelift/docs/heapex-sm32.clif @@ -5,7 +5,7 @@ function %add_members(i32, i32 vmctx) -> f32 baldrdash_system_v { gv1 = load.i32 notrap aligned gv0+64 heap0 = static gv1, min 0x1000, bound 0x10_0000, offset_guard 0x1000 -ebb0(v0: i32, v5: i32): +block0(v0: i32, v5: i32): v1 = heap_addr.i32 heap0, v0, 1 v2 = load.f32 v1+16 v3 = load.f32 v1+20 diff --git a/cranelift/docs/heapex-sm64.clif b/cranelift/docs/heapex-sm64.clif index 20934ecba5..89c2df841b 100644 --- a/cranelift/docs/heapex-sm64.clif +++ b/cranelift/docs/heapex-sm64.clif @@ -5,7 +5,7 @@ function %add_members(i32, i64 vmctx) -> f32 baldrdash_system_v { gv1 = load.i64 notrap aligned gv0+64 heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v5: i64): +block0(v0: i32, v5: i64): v1 = heap_addr.i64 heap0, v0, 1 v2 = load.f32 v1+16 v3 = load.f32 v1+20 diff --git a/cranelift/entity/src/lib.rs b/cranelift/entity/src/lib.rs index 4f007ed34c..f9062a8c51 100644 --- a/cranelift/entity/src/lib.rs +++ b/cranelift/entity/src/lib.rs @@ -104,7 +104,7 @@ macro_rules! entity_impl { }; // Include basic `Display` impl using the given display prefix. - // Display an `Ebb` reference as "ebb12". + // Display a `Block` reference as "block12". ($entity:ident, $display_prefix:expr) => { entity_impl!($entity); diff --git a/cranelift/entity/src/set.rs b/cranelift/entity/src/set.rs index c5ba346c63..ac8b156be2 100644 --- a/cranelift/entity/src/set.rs +++ b/cranelift/entity/src/set.rs @@ -216,7 +216,7 @@ mod tests { #[test] fn pop_unordered() { - let mut ebbs = [ + let mut blocks = [ E(0), E(1), E(6), @@ -231,14 +231,14 @@ mod tests { ]; let mut m = EntitySet::new(); - for &ebb in &ebbs { - m.insert(ebb); + for &block in &blocks { + m.insert(block); } assert_eq!(m.len, 13); - ebbs.sort(); + blocks.sort(); - for &ebb in ebbs.iter().rev() { - assert_eq!(ebb, m.pop().unwrap()); + for &block in blocks.iter().rev() { + assert_eq!(block, m.pop().unwrap()); } assert!(m.is_empty()); diff --git a/cranelift/faerie/src/backend.rs b/cranelift/faerie/src/backend.rs index aca9196bc5..f2c227a52a 100644 --- a/cranelift/faerie/src/backend.rs +++ b/cranelift/faerie/src/backend.rs @@ -387,7 +387,7 @@ struct FaerieRelocSink<'a> { } impl<'a> RelocSink for FaerieRelocSink<'a> { - fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { + fn reloc_block(&mut self, _offset: CodeOffset, _reloc: Reloc, _block_offset: CodeOffset) { unimplemented!(); } diff --git a/cranelift/filetests/filetests/cfg/loop.clif b/cranelift/filetests/filetests/cfg/loop.clif index a52ae09986..a18de9dc31 100644 --- a/cranelift/filetests/filetests/cfg/loop.clif +++ b/cranelift/filetests/filetests/cfg/loop.clif @@ -5,33 +5,33 @@ test verifier function %nonsense(i32, i32) -> f32 { ; regex: I=\binst\d+\b ; check: digraph "%nonsense" { -; check: ebb0 [shape=record, label="{ebb0(v1: i32, v2: i32): -; check: | <$(BRZ=$I)>brz v2, ebb2 -; nextln: | <$(JUMP0=$I)>jump ebb3 +; check: block0 [shape=record, label="{block0(v1: i32, v2: i32): +; check: | <$(BRZ=$I)>brz v2, block2 +; nextln: | <$(JUMP0=$I)>jump block3 ; nextln: }"] -; nextln: ebb3 [shape=record, label="{ebb3: -; check: | <$(JUMP3=$I)>jump ebb1(v4) +; nextln: block3 [shape=record, label="{block3: +; check: | <$(JUMP3=$I)>jump block1(v4) ; nextln: }"] -; nextln: ebb1 [shape=record, label="{ebb1(v5: i32): -; check: | <$(BRNZ1=$I)>brnz v13, ebb1(v12) -; nextln: | <$(JUMP1=$I)>jump ebb4 +; nextln: block1 [shape=record, label="{block1(v5: i32): +; check: | <$(BRNZ1=$I)>brnz v13, block1(v12) +; nextln: | <$(JUMP1=$I)>jump block4 ; nextln: }"] -; nextln: ebb4 [shape=record, label="{ebb4: +; nextln: block4 [shape=record, label="{block4: ; check: | <$I>return v17 ; nextln: }"] -; nextln: ebb2 [shape=record, label="{ebb2: +; nextln: block2 [shape=record, label="{block2: ; check: | <$I>return v100 ; check:}"] -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): v3 = f64const 0x0.0 - brz v2, ebb2 ; unordered: ebb0:$BRZ -> ebb2 - jump ebb3 ; unordered: ebb0:$JUMP0 -> ebb3 + brz v2, block2 ; unordered: block0:$BRZ -> block2 + jump block3 ; unordered: block0:$JUMP0 -> block3 -ebb3: +block3: v4 = iconst.i32 0 - jump ebb1(v4) ; unordered: ebb3:$JUMP3 -> ebb1 + jump block1(v4) ; unordered: block3:$JUMP3 -> block1 -ebb1(v5: i32): +block1(v5: i32): v6 = imul_imm v5, 4 v7 = iadd v1, v6 v8 = f32const 0.0 @@ -40,17 +40,17 @@ ebb1(v5: i32): v11 = fadd v9, v10 v12 = iadd_imm v5, 1 v13 = icmp ult v12, v2 - brnz v13, ebb1(v12) ; unordered: ebb1:$BRNZ1 -> ebb1 - jump ebb4 ; unordered: ebb1:$JUMP1 -> ebb4 + brnz v13, block1(v12) ; unordered: block1:$BRNZ1 -> block1 + jump block4 ; unordered: block1:$JUMP1 -> block4 -ebb4: +block4: v14 = f64const 0.0 v15 = f64const 0.0 v16 = fdiv v14, v15 v17 = f32const 0.0 return v17 -ebb2: +block2: v100 = f32const 0.0 return v100 } diff --git a/cranelift/filetests/filetests/cfg/traps_early.clif b/cranelift/filetests/filetests/cfg/traps_early.clif index 36f3016d5c..33de056e4c 100644 --- a/cranelift/filetests/filetests/cfg/traps_early.clif +++ b/cranelift/filetests/filetests/cfg/traps_early.clif @@ -6,16 +6,16 @@ test verifier function %nonsense(i32) { ; check: digraph "%nonsense" { -ebb0(v1: i32): +block0(v1: i32): trap user0 ; error: terminator instruction was encountered before the end - brnz v1, ebb2 ; unordered: ebb0:inst1 -> ebb2 - jump ebb1 ; unordered: ebb0:inst2 -> ebb1 + brnz v1, block2 ; unordered: block0:inst1 -> block2 + jump block1 ; unordered: block0:inst2 -> block1 -ebb1: +block1: v2 = iconst.i32 0 v3 = iadd v1, v3 - jump ebb0(v3) ; unordered: ebb1:inst5 -> ebb0 + jump block0(v3) ; unordered: block1:inst5 -> block0 -ebb2: +block2: return v1 } diff --git a/cranelift/filetests/filetests/cfg/unused_node.clif b/cranelift/filetests/filetests/cfg/unused_node.clif index 1a2dd9fb1d..41f98073fd 100644 --- a/cranelift/filetests/filetests/cfg/unused_node.clif +++ b/cranelift/filetests/filetests/cfg/unused_node.clif @@ -3,25 +3,25 @@ test print-cfg function %not_reached(i32) -> i32 { ; check: digraph "%not_reached" { -; check: ebb0 [shape=record, label="{ebb0(v0: i32): -; check: | brnz v0, ebb2 +; check: block0 [shape=record, label="{block0(v0: i32): +; check: | brnz v0, block2 ; check: | trap user0 ; check: }"] -; check: ebb1 [shape=record, label="{ebb1: -; check: | jump ebb0(v2) +; check: block1 [shape=record, label="{block1: +; check: | jump block0(v2) ; check: }"] -; check: ebb2 [shape=record, label="{ebb2: +; check: block2 [shape=record, label="{block2: ; check: | return v0 ; check: }"] -ebb0(v0: i32): - brnz v0, ebb2 ; unordered: ebb0:inst0 -> ebb2 +block0(v0: i32): + brnz v0, block2 ; unordered: block0:inst0 -> block2 trap user0 -ebb1: +block1: v1 = iconst.i32 1 v2 = iadd v0, v1 - jump ebb0(v2) ; unordered: ebb1:inst4 -> ebb0 + jump block0(v2) ; unordered: block1:inst4 -> block0 -ebb2: +block2: return v0 } diff --git a/cranelift/filetests/filetests/dce/basic.clif b/cranelift/filetests/filetests/dce/basic.clif index 436b4aee19..0c94926584 100644 --- a/cranelift/filetests/filetests/dce/basic.clif +++ b/cranelift/filetests/filetests/dce/basic.clif @@ -1,46 +1,46 @@ test dce function %simple() -> i32 { -ebb0: +block0: v2 = iconst.i32 2 v3 = iconst.i32 3 return v3 } ; sameln: function %simple -; nextln: ebb0: +; nextln: block0: ; nextln: v3 = iconst.i32 3 ; nextln: return v3 ; nextln: } function %some_branching(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v3 = iconst.i32 70 v4 = iconst.i32 71 v5 = iconst.i32 72 v8 = iconst.i32 73 - brz v0, ebb1 - jump ebb2(v8) + brz v0, block1 + jump block2(v8) -ebb1: +block1: v2 = iadd v0, v3 return v0 -ebb2(v9: i32): +block2(v9: i32): v6 = iadd v1, v4 v7 = iadd v6, v9 return v7 } ; sameln: function %some_branching -; nextln: ebb0(v0: i32, v1: i32): +; nextln: block0(v0: i32, v1: i32): ; nextln: v4 = iconst.i32 71 ; nextln: v8 = iconst.i32 73 -; nextln: brz v0, ebb1 -; nextln: jump ebb2(v8) +; nextln: brz v0, block1 +; nextln: jump block2(v8) ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: return v0 ; nextln: -; nextln: ebb2(v9: i32): +; nextln: block2(v9: i32): ; nextln: v6 = iadd.i32 v1, v4 ; nextln: v7 = iadd v6, v9 ; nextln: return v7 diff --git a/cranelift/filetests/filetests/domtree/basic.clif b/cranelift/filetests/filetests/domtree/basic.clif index 37cb20d41d..2960ab0e62 100644 --- a/cranelift/filetests/filetests/domtree/basic.clif +++ b/cranelift/filetests/filetests/domtree/basic.clif @@ -1,25 +1,25 @@ test domtree function %test(i32) { - ebb0(v0: i32): - jump ebb1 ; dominates: ebb1 - ebb1: - brz v0, ebb3 ; dominates: ebb3 - jump ebb2 ; dominates: ebb2 - ebb2: - jump ebb3 - ebb3: + block0(v0: i32): + jump block1 ; dominates: block1 + block1: + brz v0, block3 ; dominates: block3 + jump block2 ; dominates: block2 + block2: + jump block3 + block3: return } ; check: cfg_postorder: -; sameln: ebb2 -; sameln: ebb3 -; sameln: ebb1 -; sameln: ebb0 +; sameln: block2 +; sameln: block3 +; sameln: block1 +; sameln: block0 ; check: domtree_preorder { -; nextln: ebb0: ebb1 -; nextln: ebb1: ebb3 ebb2 -; nextln: ebb3: -; nextln: ebb2: +; nextln: block0: block1 +; nextln: block1: block3 block2 +; nextln: block3: +; nextln: block2: ; nextln: } diff --git a/cranelift/filetests/filetests/domtree/loops.clif b/cranelift/filetests/filetests/domtree/loops.clif index 3f3fafb01d..a2a334e3fa 100644 --- a/cranelift/filetests/filetests/domtree/loops.clif +++ b/cranelift/filetests/filetests/domtree/loops.clif @@ -1,118 +1,118 @@ test domtree function %test(i32) { - ebb0(v0: i32): - brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5 - jump ebb2 ; dominates: ebb2 - ebb1: - jump ebb3 - ebb2: - brz v0, ebb4 - jump ebb5 - ebb3: - jump ebb4 - ebb4: - brz v0, ebb3 - jump ebb5 - ebb5: - brz v0, ebb4 - jump ebb6 ; dominates: ebb6 - ebb6: + block0(v0: i32): + brz v0, block1 ; dominates: block1 block3 block4 block5 + jump block2 ; dominates: block2 + block1: + jump block3 + block2: + brz v0, block4 + jump block5 + block3: + jump block4 + block4: + brz v0, block3 + jump block5 + block5: + brz v0, block4 + jump block6 ; dominates: block6 + block6: return } ; Fall-through-first, prune-at-source DFT: ; -; ebb0 { -; ebb0:brz v0, ebb1 { -; ebb0:jump ebb2 { -; ebb2 { -; ebb2:brz v2, ebb2 - -; ebb2:brz v3, ebb1 - -; ebb2:brz v4, ebb4 { -; ebb2: jump ebb5 { -; ebb5: jump ebb6 { -; ebb6 {} +; block0 { +; block0:brz v0, block1 { +; block0:jump block2 { +; block2 { +; block2:brz v2, block2 - +; block2:brz v3, block1 - +; block2:brz v4, block4 { +; block2: jump block5 { +; block5: jump block6 { +; block6 {} ; } ; } -; ebb4 {} +; block4 {} ; } -; } ebb2 +; } block2 ; } -; ebb1 { -; ebb1:jump ebb3 { -; ebb3 {} +; block1 { +; block1:jump block3 { +; block3 {} ; } -; } ebb1 +; } block1 ; } -; } ebb0 +; } block0 ; ; check: cfg_postorder: -; sameln: ebb6 -; sameln: ebb5 -; sameln: ebb3 -; sameln: ebb4 -; sameln: ebb2 -; sameln: ebb1 -; sameln: ebb0 +; sameln: block6 +; sameln: block5 +; sameln: block3 +; sameln: block4 +; sameln: block2 +; sameln: block1 +; sameln: block0 ; check: domtree_preorder { -; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5 -; nextln: ebb1: -; nextln: ebb2: -; nextln: ebb4: -; nextln: ebb3: -; nextln: ebb5: ebb6 -; nextln: ebb6: +; nextln: block0: block1 block2 block4 block3 block5 +; nextln: block1: +; nextln: block2: +; nextln: block4: +; nextln: block3: +; nextln: block5: block6 +; nextln: block6: ; nextln: } function %loop2(i32) system_v { - ebb0(v0: i32): - brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5 - jump ebb2 ; dominates: ebb2 - ebb1: - jump ebb3 - ebb2: - brz v0, ebb4 - jump ebb5 - ebb3: - jump ebb4 - ebb4: - brz v0, ebb3 - jump ebb8 ; dominates: ebb8 - ebb8: - brnz v0, ebb5 - jump ebb6 ; dominates: ebb6 - ebb5: - brz v0, ebb4 - jump ebb9 ; dominates: ebb9 - ebb9: + block0(v0: i32): + brz v0, block1 ; dominates: block1 block3 block4 block5 + jump block2 ; dominates: block2 + block1: + jump block3 + block2: + brz v0, block4 + jump block5 + block3: + jump block4 + block4: + brz v0, block3 + jump block8 ; dominates: block8 + block8: + brnz v0, block5 + jump block6 ; dominates: block6 + block5: + brz v0, block4 + jump block9 ; dominates: block9 + block9: trap user0 - ebb6: - jump ebb7 ; dominates: ebb7 - ebb7: + block6: + jump block7 ; dominates: block7 + block7: return } ; check: cfg_postorder: -; sameln: ebb9 -; sameln: ebb5 -; sameln: ebb7 -; sameln: ebb6 -; sameln: ebb8 -; sameln: ebb3 -; sameln: ebb4 -; sameln: ebb2 -; sameln: ebb1 -; sameln: ebb0 +; sameln: block9 +; sameln: block5 +; sameln: block7 +; sameln: block6 +; sameln: block8 +; sameln: block3 +; sameln: block4 +; sameln: block2 +; sameln: block1 +; sameln: block0 ; check: domtree_preorder { -; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5 -; nextln: ebb1: -; nextln: ebb2: -; nextln: ebb4: ebb8 -; nextln: ebb8: ebb6 -; nextln: ebb6: ebb7 -; nextln: ebb7: -; nextln: ebb3: -; nextln: ebb5: ebb9 -; nextln: ebb9: +; nextln: block0: block1 block2 block4 block3 block5 +; nextln: block1: +; nextln: block2: +; nextln: block4: block8 +; nextln: block8: block6 +; nextln: block6: block7 +; nextln: block7: +; nextln: block3: +; nextln: block5: block9 +; nextln: block9: ; nextln: } diff --git a/cranelift/filetests/filetests/domtree/loops2.clif b/cranelift/filetests/filetests/domtree/loops2.clif index 84712c112f..140916bafb 100644 --- a/cranelift/filetests/filetests/domtree/loops2.clif +++ b/cranelift/filetests/filetests/domtree/loops2.clif @@ -1,92 +1,92 @@ test domtree function %loop1(i32) { - ebb0(v0: i32): - brz v0, ebb1 ; dominates: ebb1 ebb6 - jump ebb10 ; dominates: ebb10 - ebb10: - brnz v0, ebb2 ; dominates: ebb2 ebb9 - jump ebb3 ; dominates: ebb3 - ebb1: - jump ebb6 - ebb2: - brz v0, ebb4 ; dominates: ebb4 ebb7 ebb8 - jump ebb5 ; dominates: ebb5 - ebb3: - jump ebb9 - ebb4: - brz v0, ebb4 - jump ebb11 ; dominates: ebb11 - ebb11: - brnz v0, ebb6 - jump ebb7 - ebb5: - brz v0, ebb7 - jump ebb12 ; dominates: ebb12 - ebb12: - brnz v0, ebb8 - jump ebb9 - ebb6: + block0(v0: i32): + brz v0, block1 ; dominates: block1 block6 + jump block10 ; dominates: block10 + block10: + brnz v0, block2 ; dominates: block2 block9 + jump block3 ; dominates: block3 + block1: + jump block6 + block2: + brz v0, block4 ; dominates: block4 block7 block8 + jump block5 ; dominates: block5 + block3: + jump block9 + block4: + brz v0, block4 + jump block11 ; dominates: block11 + block11: + brnz v0, block6 + jump block7 + block5: + brz v0, block7 + jump block12 ; dominates: block12 + block12: + brnz v0, block8 + jump block9 + block6: return - ebb7: - jump ebb8 - ebb8: + block7: + jump block8 + block8: return - ebb9: + block9: return } ; check: domtree_preorder { -; nextln: ebb0: ebb1 ebb10 ebb6 -; nextln: ebb1: -; nextln: ebb10: ebb2 ebb3 ebb9 -; nextln: ebb2: ebb4 ebb5 ebb7 ebb8 -; nextln: ebb4: ebb11 -; nextln: ebb11: -; nextln: ebb5: ebb12 -; nextln: ebb12: -; nextln: ebb7: -; nextln: ebb8: -; nextln: ebb3: -; nextln: ebb9: -; nextln: ebb6: +; nextln: block0: block1 block10 block6 +; nextln: block1: +; nextln: block10: block2 block3 block9 +; nextln: block2: block4 block5 block7 block8 +; nextln: block4: block11 +; nextln: block11: +; nextln: block5: block12 +; nextln: block12: +; nextln: block7: +; nextln: block8: +; nextln: block3: +; nextln: block9: +; nextln: block6: ; nextln: } function %loop2(i32) system_v { - ebb0(v0: i32): - brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5 - jump ebb2 ; dominates: ebb2 - ebb1: - jump ebb3 - ebb2: - brz v0, ebb4 - jump ebb5 - ebb3: - jump ebb4 - ebb4: - brz v0, ebb3 - jump ebb5 - ebb5: - brz v0, ebb4 - jump ebb6 ; dominates: ebb6 - ebb6: + block0(v0: i32): + brz v0, block1 ; dominates: block1 block3 block4 block5 + jump block2 ; dominates: block2 + block1: + jump block3 + block2: + brz v0, block4 + jump block5 + block3: + jump block4 + block4: + brz v0, block3 + jump block5 + block5: + brz v0, block4 + jump block6 ; dominates: block6 + block6: return } ; check: cfg_postorder: -; sameln: ebb6 -; sameln: ebb5 -; sameln: ebb3 -; sameln: ebb4 -; sameln: ebb2 -; sameln: ebb1 -; sameln: ebb0 +; sameln: block6 +; sameln: block5 +; sameln: block3 +; sameln: block4 +; sameln: block2 +; sameln: block1 +; sameln: block0 ; check: domtree_preorder { -; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5 -; nextln: ebb1: -; nextln: ebb2: -; nextln: ebb4: -; nextln: ebb3: -; nextln: ebb5: ebb6 -; nextln: ebb6: +; nextln: block0: block1 block2 block4 block3 block5 +; nextln: block1: +; nextln: block2: +; nextln: block4: +; nextln: block3: +; nextln: block5: block6 +; nextln: block6: ; nextln: } diff --git a/cranelift/filetests/filetests/domtree/tall-tree.clif b/cranelift/filetests/filetests/domtree/tall-tree.clif index 6f93c023e8..436edc643b 100644 --- a/cranelift/filetests/filetests/domtree/tall-tree.clif +++ b/cranelift/filetests/filetests/domtree/tall-tree.clif @@ -1,54 +1,54 @@ test domtree function %test(i32) { - ebb0(v0: i32): - brz v0, ebb1 ; dominates: ebb1 - jump ebb12 ; dominates: ebb12 - ebb12: - brnz v0, ebb2 ; dominates: ebb2 ebb5 - jump ebb3 ; dominates: ebb3 - ebb1: - jump ebb4 ; dominates: ebb4 - ebb2: - jump ebb5 - ebb3: - jump ebb5 - ebb4: - brz v0, ebb6 ; dominates: ebb6 ebb10 - jump ebb7 ; dominates: ebb7 - ebb5: + block0(v0: i32): + brz v0, block1 ; dominates: block1 + jump block12 ; dominates: block12 + block12: + brnz v0, block2 ; dominates: block2 block5 + jump block3 ; dominates: block3 + block1: + jump block4 ; dominates: block4 + block2: + jump block5 + block3: + jump block5 + block4: + brz v0, block6 ; dominates: block6 block10 + jump block7 ; dominates: block7 + block5: return - ebb6: - brz v0, ebb8 ; dominates: ebb11 ebb8 - jump ebb13 ; dominates: ebb13 - ebb13: - brnz v0, ebb9 ; dominates: ebb9 - jump ebb10 - ebb7: - jump ebb10 - ebb8: - jump ebb11 - ebb9: - jump ebb11 - ebb10: + block6: + brz v0, block8 ; dominates: block11 block8 + jump block13 ; dominates: block13 + block13: + brnz v0, block9 ; dominates: block9 + jump block10 + block7: + jump block10 + block8: + jump block11 + block9: + jump block11 + block10: return - ebb11: + block11: return } ; check: domtree_preorder { -; nextln: ebb0: ebb1 ebb12 -; nextln: ebb1: ebb4 -; nextln: ebb4: ebb6 ebb7 ebb10 -; nextln: ebb6: ebb8 ebb13 ebb11 -; nextln: ebb8: -; nextln: ebb13: ebb9 -; nextln: ebb9: -; nextln: ebb11: -; nextln: ebb7: -; nextln: ebb10: -; nextln: ebb12: ebb2 ebb3 ebb5 -; nextln: ebb2: -; nextln: ebb3: -; nextln: ebb5: +; nextln: block0: block1 block12 +; nextln: block1: block4 +; nextln: block4: block6 block7 block10 +; nextln: block6: block8 block13 block11 +; nextln: block8: +; nextln: block13: block9 +; nextln: block9: +; nextln: block11: +; nextln: block7: +; nextln: block10: +; nextln: block12: block2 block3 block5 +; nextln: block2: +; nextln: block3: +; nextln: block5: ; nextln: } diff --git a/cranelift/filetests/filetests/domtree/wide-tree.clif b/cranelift/filetests/filetests/domtree/wide-tree.clif index fdfdc169a1..e118e684f0 100644 --- a/cranelift/filetests/filetests/domtree/wide-tree.clif +++ b/cranelift/filetests/filetests/domtree/wide-tree.clif @@ -1,73 +1,73 @@ test domtree function %test(i32) { - ebb0(v0: i32): - brz v0, ebb13 ; dominates: ebb13 - jump ebb1 ; dominates: ebb1 - ebb1: - brz v0, ebb2 ; dominates: ebb2 ebb7 - jump ebb20 ; dominates: ebb20 - ebb20: - brnz v0, ebb3 ; dominates: ebb3 - jump ebb21 ; dominates: ebb21 - ebb21: - brz v0, ebb4 ; dominates: ebb4 - jump ebb22 ; dominates: ebb22 - ebb22: - brnz v0, ebb5 ; dominates: ebb5 - jump ebb6 ; dominates: ebb6 - ebb2: - jump ebb7 - ebb3: - jump ebb7 - ebb4: - jump ebb7 - ebb5: - jump ebb7 - ebb6: - jump ebb7 - ebb7: - brnz v0, ebb8 ; dominates: ebb8 ebb12 - jump ebb23 ; dominates: ebb23 - ebb23: - brz v0, ebb9 ; dominates: ebb9 - jump ebb24 ; dominates: ebb24 - ebb24: - brnz v0, ebb10 ; dominates: ebb10 - jump ebb11 ; dominates: ebb11 - ebb8: - jump ebb12 - ebb9: - jump ebb12 - ebb10: - brz v0, ebb13 - jump ebb12 - ebb11: - jump ebb13 - ebb12: + block0(v0: i32): + brz v0, block13 ; dominates: block13 + jump block1 ; dominates: block1 + block1: + brz v0, block2 ; dominates: block2 block7 + jump block20 ; dominates: block20 + block20: + brnz v0, block3 ; dominates: block3 + jump block21 ; dominates: block21 + block21: + brz v0, block4 ; dominates: block4 + jump block22 ; dominates: block22 + block22: + brnz v0, block5 ; dominates: block5 + jump block6 ; dominates: block6 + block2: + jump block7 + block3: + jump block7 + block4: + jump block7 + block5: + jump block7 + block6: + jump block7 + block7: + brnz v0, block8 ; dominates: block8 block12 + jump block23 ; dominates: block23 + block23: + brz v0, block9 ; dominates: block9 + jump block24 ; dominates: block24 + block24: + brnz v0, block10 ; dominates: block10 + jump block11 ; dominates: block11 + block8: + jump block12 + block9: + jump block12 + block10: + brz v0, block13 + jump block12 + block11: + jump block13 + block12: return - ebb13: + block13: return } ; check: domtree_preorder { -; nextln: ebb0: ebb13 ebb1 -; nextln: ebb13: -; nextln: ebb1: ebb2 ebb20 ebb7 -; nextln: ebb2: -; nextln: ebb20: ebb3 ebb21 -; nextln: ebb3: -; nextln: ebb21: ebb4 ebb22 -; nextln: ebb4: -; nextln: ebb22: ebb5 ebb6 -; nextln: ebb5: -; nextln: ebb6: -; nextln: ebb7: ebb8 ebb23 ebb12 -; nextln: ebb8: -; nextln: ebb23: ebb9 ebb24 -; nextln: ebb9: -; nextln: ebb24: ebb10 ebb11 -; nextln: ebb10: -; nextln: ebb11: -; nextln: ebb12: +; nextln: block0: block13 block1 +; nextln: block13: +; nextln: block1: block2 block20 block7 +; nextln: block2: +; nextln: block20: block3 block21 +; nextln: block3: +; nextln: block21: block4 block22 +; nextln: block4: +; nextln: block22: block5 block6 +; nextln: block5: +; nextln: block6: +; nextln: block7: block8 block23 block12 +; nextln: block8: +; nextln: block23: block9 block24 +; nextln: block9: +; nextln: block24: block10 block11 +; nextln: block10: +; nextln: block11: +; nextln: block12: ; nextln: } diff --git a/cranelift/filetests/filetests/isa/riscv/abi-e.clif b/cranelift/filetests/filetests/isa/riscv/abi-e.clif index 80b275506c..fcd762ee81 100644 --- a/cranelift/filetests/filetests/isa/riscv/abi-e.clif +++ b/cranelift/filetests/filetests/isa/riscv/abi-e.clif @@ -9,6 +9,6 @@ function %f() { ; available in RV32E. sig0 = (i64, i64, i64, i64) -> i64 system_v ; check: sig0 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11] system_v -ebb0: +block0: return } diff --git a/cranelift/filetests/filetests/isa/riscv/abi.clif b/cranelift/filetests/filetests/isa/riscv/abi.clif index b3c9513aef..d9469f490e 100644 --- a/cranelift/filetests/filetests/isa/riscv/abi.clif +++ b/cranelift/filetests/filetests/isa/riscv/abi.clif @@ -27,6 +27,6 @@ function %f() { sig5 = (i64x4) system_v ; check: sig5 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17]) system_v -ebb0: +block0: return } diff --git a/cranelift/filetests/filetests/isa/riscv/binary32.clif b/cranelift/filetests/filetests/isa/riscv/binary32.clif index b76bfcce69..5a69c4289b 100644 --- a/cranelift/filetests/filetests/isa/riscv/binary32.clif +++ b/cranelift/filetests/filetests/isa/riscv/binary32.clif @@ -6,7 +6,7 @@ function %RV32I(i32 link [%x1]) -> i32 link [%x1] { sig0 = () fn0 = %foo() -ebb0(v9999: i32): +block0(v9999: i32): [-,%x10] v1 = iconst.i32 1 [-,%x21] v2 = iconst.i32 2 @@ -94,96 +94,96 @@ ebb0(v9999: i32): call_indirect sig0, v1() ; bin: 000500e7 call_indirect sig0, v2() ; bin: 000a80e7 - brz v1, ebb3 - fallthrough ebb4 + brz v1, block3 + fallthrough block4 -ebb4: - brnz v1, ebb1 - fallthrough ebb5 +block4: + brnz v1, block1 + fallthrough block5 -ebb5: +block5: ; jalr %x0, %x1, 0 return v9999 ; bin: 00008067 -ebb1: +block1: ; beq 0x000 - br_icmp eq v1, v2, ebb1 ; bin: 01550063 - fallthrough ebb100 + br_icmp eq v1, v2, block1 ; bin: 01550063 + fallthrough block100 -ebb100: +block100: ; bne 0xffc - br_icmp ne v1, v2, ebb1 ; bin: ff551ee3 - fallthrough ebb101 + br_icmp ne v1, v2, block1 ; bin: ff551ee3 + fallthrough block101 -ebb101: +block101: ; blt 0xff8 - br_icmp slt v1, v2, ebb1 ; bin: ff554ce3 - fallthrough ebb102 + br_icmp slt v1, v2, block1 ; bin: ff554ce3 + fallthrough block102 -ebb102: +block102: ; bge 0xff4 - br_icmp sge v1, v2, ebb1 ; bin: ff555ae3 - fallthrough ebb103 + br_icmp sge v1, v2, block1 ; bin: ff555ae3 + fallthrough block103 -ebb103: +block103: ; bltu 0xff0 - br_icmp ult v1, v2, ebb1 ; bin: ff5568e3 - fallthrough ebb104 + br_icmp ult v1, v2, block1 ; bin: ff5568e3 + fallthrough block104 -ebb104: +block104: ; bgeu 0xfec - br_icmp uge v1, v2, ebb1 ; bin: ff5576e3 - fallthrough ebb105 + br_icmp uge v1, v2, block1 ; bin: ff5576e3 + fallthrough block105 -ebb105: +block105: ; Forward branches. - fallthrough ebb106 + fallthrough block106 -ebb106: +block106: ; beq 0x018 - br_icmp eq v2, v1, ebb2 ; bin: 00aa8c63 - fallthrough ebb107 + br_icmp eq v2, v1, block2 ; bin: 00aa8c63 + fallthrough block107 -ebb107: +block107: ; bne 0x014 - br_icmp ne v2, v1, ebb2 ; bin: 00aa9a63 - fallthrough ebb108 + br_icmp ne v2, v1, block2 ; bin: 00aa9a63 + fallthrough block108 -ebb108: +block108: ; blt 0x010 - br_icmp slt v2, v1, ebb2 ; bin: 00aac863 - fallthrough ebb109 + br_icmp slt v2, v1, block2 ; bin: 00aac863 + fallthrough block109 -ebb109: +block109: ; bge 0x00c - br_icmp sge v2, v1, ebb2 ; bin: 00aad663 - fallthrough ebb110 + br_icmp sge v2, v1, block2 ; bin: 00aad663 + fallthrough block110 -ebb110: +block110: ; bltu 0x008 - br_icmp ult v2, v1, ebb2 ; bin: 00aae463 - fallthrough ebb111 + br_icmp ult v2, v1, block2 ; bin: 00aae463 + fallthrough block111 -ebb111: +block111: ; bgeu 0x004 - br_icmp uge v2, v1, ebb2 ; bin: 00aaf263 + br_icmp uge v2, v1, block2 ; bin: 00aaf263 - fallthrough ebb2 + fallthrough block2 -ebb2: +block2: ; jal %x0, 0x00000 - jump ebb2 ; bin: 0000006f + jump block2 ; bin: 0000006f -ebb3: +block3: ; beq x, %x0 - brz v1, ebb3 ; bin: 00050063 - fallthrough ebb6 + brz v1, block3 ; bin: 00050063 + fallthrough block6 -ebb6: +block6: ; bne x, %x0 - brnz v1, ebb3 ; bin: fe051ee3 + brnz v1, block3 ; bin: fe051ee3 ; jal %x0, 0x1ffff4 - jump ebb2 ; bin: ff5ff06f + jump block2 ; bin: ff5ff06f } diff --git a/cranelift/filetests/filetests/isa/riscv/encoding.clif b/cranelift/filetests/filetests/isa/riscv/encoding.clif index 98b5f66db6..b8c991f52e 100644 --- a/cranelift/filetests/filetests/isa/riscv/encoding.clif +++ b/cranelift/filetests/filetests/isa/riscv/encoding.clif @@ -2,7 +2,7 @@ test legalizer target riscv32 supports_m=1 function %int32(i32, i32) { -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): v10 = iadd v1, v2 ; check: [R#0c] ; sameln: v10 = iadd diff --git a/cranelift/filetests/filetests/isa/riscv/expand-i32.clif b/cranelift/filetests/filetests/isa/riscv/expand-i32.clif index eb63d7cdcd..ee62bc093f 100644 --- a/cranelift/filetests/filetests/isa/riscv/expand-i32.clif +++ b/cranelift/filetests/filetests/isa/riscv/expand-i32.clif @@ -8,7 +8,7 @@ target riscv64 supports_m=1 ; regex: V=v\d+ function %carry_out(i32, i32) -> i32, b1 { -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): v3, v4 = iadd_cout v1, v2 return v3, v4 } @@ -19,7 +19,7 @@ ebb0(v1: i32, v2: i32): ; Expanding illegal immediate constants. ; Note that at some point we'll probably expand the iconst as well. function %large_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd_imm v0, 1000000000 return v1 } @@ -28,7 +28,7 @@ ebb0(v0: i32): ; check: return v1 function %bitclear(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = band_not v0, v1 ; check: iconst.i32 -1 ; check: bxor diff --git a/cranelift/filetests/filetests/isa/riscv/legalize-abi.clif b/cranelift/filetests/filetests/isa/riscv/legalize-abi.clif index 88c5989db6..0a5fb801a3 100644 --- a/cranelift/filetests/filetests/isa/riscv/legalize-abi.clif +++ b/cranelift/filetests/filetests/isa/riscv/legalize-abi.clif @@ -7,8 +7,8 @@ target riscv32 ; regex: WS=\s+ function %int_split_args(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): +block0(v0: i64): + ; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): ; check: v0 = iconcat $v0l, $v0h v1 = iadd_imm v0, 1 ; check: $(v1l=$V), $(v1h=$V) = isplit v1 @@ -19,7 +19,7 @@ ebb0(v0: i64): function %split_call_arg(i32) { fn1 = %foo(i64) fn2 = %foo(i32, i64) -ebb0(v0: i32): +block0(v0: i32): v1 = uextend.i64 v0 call fn1(v1) ; check: $(v1l=$V), $(v1h=$V) = isplit v1 @@ -31,36 +31,36 @@ ebb0(v0: i32): function %split_ret_val() { fn1 = %foo() -> i64 -ebb0: +block0: v1 = call fn1() - ; check: ebb0($(link=$V): i32): + ; check: block0($(link=$V): i32): ; nextln: $(v1l=$V), $(v1h=$V) = call fn1() ; check: v1 = iconcat $v1l, $v1h - jump ebb1(v1) - ; check: jump ebb1(v1) + jump block1(v1) + ; check: jump block1(v1) -ebb1(v10: i64): - jump ebb1(v10) +block1(v10: i64): + jump block1(v10) } ; First return value is fine, second one is expanded. function %split_ret_val2() { fn1 = %foo() -> i32, i64 -ebb0: +block0: v1, v2 = call fn1() - ; check: ebb0($(link=$V): i32): + ; check: block0($(link=$V): i32): ; nextln: v1, $(v2l=$V), $(v2h=$V) = call fn1() ; check: v2 = iconcat $v2l, $v2h - jump ebb1(v1, v2) - ; check: jump ebb1(v1, v2) + jump block1(v1, v2) + ; check: jump block1(v1, v2) -ebb1(v9: i32, v10: i64): - jump ebb1(v9, v10) +block1(v9: i32, v10: i64): + jump block1(v9, v10) } function %int_ext(i8, i8 sext, i8 uext) -> i8 uext { -ebb0(v1: i8, v2: i8, v3: i8): - ; check: ebb0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32): +block0(v1: i8, v2: i8, v3: i8): + ; check: block0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32): ; check: v2 = ireduce.i8 $v2x ; check: v3 = ireduce.i8 $v3x ; check: $(v1x=$V) = uextend.i32 v1 @@ -71,21 +71,21 @@ ebb0(v1: i8, v2: i8, v3: i8): ; Function produces single return value, still need to copy. function %ext_ret_val() { fn1 = %foo() -> i8 sext -ebb0: +block0: v1 = call fn1() - ; check: ebb0($V: i32): + ; check: block0($V: i32): ; nextln: $(rv=$V) = call fn1() ; check: v1 = ireduce.i8 $rv - jump ebb1(v1) - ; check: jump ebb1(v1) + jump block1(v1) + ; check: jump block1(v1) -ebb1(v10: i8): - jump ebb1(v10) +block1(v10: i8): + jump block1(v10) } function %vector_split_args(i64x4) -> i64x4 { -ebb0(v0: i64x4): - ; check: ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32): +block0(v0: i64x4): + ; check: block0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32): ; check: $(v0a=$V) = iconcat $v0al, $v0ah ; check: $(v0b=$V) = iconcat $v0bl, $v0bh ; check: $(v0ab=$V) = vconcat $v0a, $v0b @@ -107,7 +107,7 @@ ebb0(v0: i64x4): function %indirect(i32) { sig1 = () system_v -ebb0(v0: i32): +block0(v0: i32): call_indirect sig1, v0() return } @@ -115,7 +115,7 @@ ebb0(v0: i32): ; The first argument to call_indirect doesn't get altered. function %indirect_arg(i32, f32x2) { sig1 = (f32x2) system_v -ebb0(v0: i32, v1: f32x2): +block0(v0: i32, v1: f32x2): call_indirect sig1, v0(v1) ; check: call_indirect sig1, v0($V, $V) return @@ -125,7 +125,7 @@ ebb0(v0: i32, v1: f32x2): function %stack_args(i32) { ; check: $(ss0=$SS) = outgoing_arg 4 fn1 = %foo(i64, i64, i64, i64, i32) -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i64 1 call fn1(v1, v1, v1, v1, v0) ; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill v0 diff --git a/cranelift/filetests/filetests/isa/riscv/legalize-i64.clif b/cranelift/filetests/filetests/isa/riscv/legalize-i64.clif index d043337a21..11b31218be 100644 --- a/cranelift/filetests/filetests/isa/riscv/legalize-i64.clif +++ b/cranelift/filetests/filetests/isa/riscv/legalize-i64.clif @@ -5,11 +5,11 @@ target riscv32 supports_m=1 ; regex: V=v\d+ function %bitwise_and(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v3 = band v1, v2 return v3 } -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): ; check: [R#ec ; sameln: $(v3l=$V) = band $v1l, $v2l ; check: [R#ec @@ -18,11 +18,11 @@ ebb0(v1: i64, v2: i64): ; check: return $v3l, $v3h, $link function %bitwise_or(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v3 = bor v1, v2 return v3 } -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): ; check: [R#cc ; sameln: $(v3l=$V) = bor $v1l, $v2l ; check: [R#cc @@ -31,11 +31,11 @@ ebb0(v1: i64, v2: i64): ; check: return $v3l, $v3h, $link function %bitwise_xor(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v3 = bxor v1, v2 return v3 } -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): ; check: [R#8c ; sameln: $(v3l=$V) = bxor $v1l, $v2l ; check: [R#8c @@ -47,11 +47,11 @@ function %arith_add(i64, i64) -> i64 { ; Legalizing iadd.i64 requires two steps: ; 1. Narrow to iadd_cout.i32, then ; 2. Expand iadd_cout.i32 since RISC-V has no carry flag. -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v3 = iadd v1, v2 return v3 } -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): ; check: [R#0c ; sameln: $(v3l=$V) = iadd $v1l, $v2l ; check: $(c=$V) = icmp ult $v3l, $v1l diff --git a/cranelift/filetests/filetests/isa/riscv/legalize-icmp_imm-i64.clif b/cranelift/filetests/filetests/isa/riscv/legalize-icmp_imm-i64.clif index 3dd674a5d3..d7250cb3af 100644 --- a/cranelift/filetests/filetests/isa/riscv/legalize-icmp_imm-i64.clif +++ b/cranelift/filetests/filetests/isa/riscv/legalize-icmp_imm-i64.clif @@ -4,11 +4,11 @@ target riscv32 ; regex: V=v\d+ function %icmp_imm_eq(i64) -> b1 { -ebb0(v0: i64): +block0(v0: i64): v1 = icmp_imm eq v0, 0x20202020_10101010 return v1 } -; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): +; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): ; nextln: $(v2l=$V) -> $(v0l) ; nextln: $(v2h=$V) -> $(v0h) ; nextln: v0 = iconcat $(v0l), $(v0h) @@ -20,11 +20,11 @@ ebb0(v0: i64): ; nextln: return v1, $(link) function %icmp_imm_ne(i64) -> b1 { -ebb0(v0: i64): +block0(v0: i64): v1 = icmp_imm ne v0, 0x33333333_44444444 return v1 } -; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): +; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): ; nextln: $(v2l=$V) -> $(v0l) ; nextln: $(v2h=$V) -> $(v0h) ; nextln: v0 = iconcat $(v0l), $(v0h) @@ -36,11 +36,11 @@ ebb0(v0: i64): ; nextln: return v1, $(link) function %icmp_imm_sge(i64) -> b1 { -ebb0(v0: i64): +block0(v0: i64): v1 = icmp_imm sge v0, 0x01020304_05060708 return v1 } -; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): +; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32): ; nextln: $(v2l=$V) -> $(v0l) ; nextln: $(v2h=$V) -> $(v0h) ; nextln: v0 = iconcat $(v0l), $(v0h) diff --git a/cranelift/filetests/filetests/isa/riscv/parse-encoding.clif b/cranelift/filetests/filetests/isa/riscv/parse-encoding.clif index f79e552e6d..21cd828b8a 100644 --- a/cranelift/filetests/filetests/isa/riscv/parse-encoding.clif +++ b/cranelift/filetests/filetests/isa/riscv/parse-encoding.clif @@ -31,6 +31,6 @@ function %parse_encoding(i32 [%x5]) -> i32 [%x10] { ; check: sig6 = (i32 [%x10]) -> b1 [%x10] system_v ; nextln: fn0 = %bar sig6 -ebb0(v0: i32): +block0(v0: i32): return v0 } diff --git a/cranelift/filetests/filetests/isa/riscv/regmove.clif b/cranelift/filetests/filetests/isa/riscv/regmove.clif index 6ec17ef813..f1509e8178 100644 --- a/cranelift/filetests/filetests/isa/riscv/regmove.clif +++ b/cranelift/filetests/filetests/isa/riscv/regmove.clif @@ -3,7 +3,7 @@ test binemit target riscv32 function %regmoves(i32 link [%x1]) -> i32 link [%x1] { -ebb0(v9999: i32): +block0(v9999: i32): [-,%x10] v1 = iconst.i32 1 [-,%x7] v2 = iadd_imm v1, 1000 ; bin: 3e850393 regmove v1, %x10 -> %x11 ; bin: 00050593 diff --git a/cranelift/filetests/filetests/isa/riscv/split-args.clif b/cranelift/filetests/filetests/isa/riscv/split-args.clif index dd605de81a..9f4b3e9268 100644 --- a/cranelift/filetests/filetests/isa/riscv/split-args.clif +++ b/cranelift/filetests/filetests/isa/riscv/split-args.clif @@ -1,17 +1,17 @@ -; Test the legalization of EBB arguments that are split. +; Test the legalization of block arguments that are split. test legalizer target riscv32 ; regex: V=v\d+ function %simple(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): - jump ebb1(v1) - ; check: jump ebb1($v1l, $v1h) +block0(v1: i64, v2: i64): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): + jump block1(v1) + ; check: jump block1($v1l, $v1h) -ebb1(v3: i64): -; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32): +block1(v3: i64): +; check: block1($(v3l=$V): i32, $(v3h=$V): i32): v4 = band v3, v2 ; check: $(v4l=$V) = band $v3l, $v2l ; check: $(v4h=$V) = band $v3h, $v2h @@ -20,18 +20,18 @@ ebb1(v3: i64): } function %multi(i64) -> i64 { -ebb1(v1: i64): -; check: ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32): - jump ebb2(v1, v1) - ; check: jump ebb2($v1l, $v1l, $v1h, $v1h) +block1(v1: i64): +; check: block1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32): + jump block2(v1, v1) + ; check: jump block2($v1l, $v1l, $v1h, $v1h) -ebb2(v2: i64, v3: i64): -; check: ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32): - jump ebb3(v2) - ; check: jump ebb3($v2l, $v2h) +block2(v2: i64, v3: i64): +; check: block2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32): + jump block3(v2) + ; check: jump block3($v2l, $v2h) -ebb3(v4: i64): -; check: ebb3($(v4l=$V): i32, $(v4h=$V): i32): +block3(v4: i64): +; check: block3($(v4l=$V): i32, $(v4h=$V): i32): v5 = band v4, v3 ; check: $(v5l=$V) = band $v4l, $v3l ; check: $(v5h=$V) = band $v4h, $v3h @@ -40,16 +40,16 @@ ebb3(v4: i64): } function %loop(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): -; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): - jump ebb1(v1) - ; check: jump ebb1($v1l, $v1h) +block0(v1: i64, v2: i64): +; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32): + jump block1(v1) + ; check: jump block1($v1l, $v1h) -ebb1(v3: i64): -; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32): +block1(v3: i64): +; check: block1($(v3l=$V): i32, $(v3h=$V): i32): v4 = band v3, v2 ; check: $(v4l=$V) = band $v3l, $v2l ; check: $(v4h=$V) = band $v3h, $v2h - jump ebb1(v4) - ; check: jump ebb1($v4l, $v4h) + jump block1(v4) + ; check: jump block1($v4l, $v4h) } diff --git a/cranelift/filetests/filetests/isa/riscv/verify-encoding.clif b/cranelift/filetests/filetests/isa/riscv/verify-encoding.clif index 0ee7eb7a83..1d29b86da9 100644 --- a/cranelift/filetests/filetests/isa/riscv/verify-encoding.clif +++ b/cranelift/filetests/filetests/isa/riscv/verify-encoding.clif @@ -4,7 +4,7 @@ target riscv32 function %RV32I(i32 link [%x1]) -> i32 link [%x1] { fn0 = %foo() -ebb0(v9999: i32): +block0(v9999: i32): ; iconst.i32 needs legalizing, so it should throw a [R#0,-] v1 = iconst.i32 0xf0f0f0f0f0 ; error: Instruction failed to re-encode [Iret#19] return v9999 @@ -13,7 +13,7 @@ ebb0(v9999: i32): function %RV32I(i32 link [%x1]) -> i32 link [%x1] { fn0 = %foo() -ebb0(v9999: i32): +block0(v9999: i32): v1 = iconst.i32 1 v2 = iconst.i32 2 [R#0,-] v3 = iadd v1, v2 ; error: encoding R#00 should be R#0c diff --git a/cranelift/filetests/filetests/isa/x86/abcd.clif b/cranelift/filetests/filetests/isa/x86/abcd.clif index 67acac970b..658ba66ca4 100644 --- a/cranelift/filetests/filetests/isa/x86/abcd.clif +++ b/cranelift/filetests/filetests/isa/x86/abcd.clif @@ -5,7 +5,7 @@ target i686 ; allocator can move it to a register that can be. function %test(i32 [%rdi]) -> i32 system_v { -ebb0(v0: i32 [%rdi]): +block0(v0: i32 [%rdi]): v1 = ireduce.i8 v0 v2 = sextend.i32 v1 return v2 diff --git a/cranelift/filetests/filetests/isa/x86/abi-bool.clif b/cranelift/filetests/filetests/isa/x86/abi-bool.clif index fdf21ba055..2083250a91 100644 --- a/cranelift/filetests/filetests/isa/x86/abi-bool.clif +++ b/cranelift/filetests/filetests/isa/x86/abi-bool.clif @@ -2,18 +2,18 @@ test compile target x86_64 haswell function %foo(i64, i64, i64, i32) -> b1 system_v { -ebb3(v0: i64, v1: i64, v2: i64, v3: i32): +block3(v0: i64, v1: i64, v2: i64, v3: i32): v5 = icmp ne v2, v2 v8 = iconst.i64 0 - jump ebb2(v8, v3, v5) + jump block2(v8, v3, v5) -ebb2(v10: i64, v30: i32, v37: b1): +block2(v10: i64, v30: i32, v37: b1): v18 = load.i32 notrap aligned v2 v27 = iadd.i64 v10, v10 v31 = icmp eq v30, v30 - brz v31, ebb2(v27, v30, v37) - jump ebb0(v37) + brz v31, block2(v27, v30, v37) + jump block0(v37) -ebb0(v35: b1): +block0(v35: b1): return v35 } diff --git a/cranelift/filetests/filetests/isa/x86/abi32.clif b/cranelift/filetests/filetests/isa/x86/abi32.clif index 4b9f5fbcd1..155d0efc4c 100644 --- a/cranelift/filetests/filetests/isa/x86/abi32.clif +++ b/cranelift/filetests/filetests/isa/x86/abi32.clif @@ -14,7 +14,7 @@ function %f() { sig2 = (f32, i64) -> f64 system_v ; check: sig2 = (f32 [0], i32 [4], i32 [8]) -> f64 [%xmm0] system_v -ebb0: +block0: return } diff --git a/cranelift/filetests/filetests/isa/x86/abi64.clif b/cranelift/filetests/filetests/isa/x86/abi64.clif index ccea6304e6..9494e78c67 100644 --- a/cranelift/filetests/filetests/isa/x86/abi64.clif +++ b/cranelift/filetests/filetests/isa/x86/abi64.clif @@ -14,7 +14,7 @@ function %f() { sig2 = (f32, i64) -> f64 system_v ; check: sig2 = (f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0] system_v -ebb0: +block0: return } @@ -22,10 +22,10 @@ function %pass_stack_int64(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 sig0 = (i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 vmctx) baldrdash_system_v fn0 = u0:0 sig0 -ebb0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64, v6: i64, v7: i64, v8: i64, v9: i64, v10: i64, v11: i64, v12: i64, v13: i64, v14: i64, v15: i64, v16: i64, v17: i64, v18: i64, v19: i64, v20: i64): +block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64, v6: i64, v7: i64, v8: i64, v9: i64, v10: i64, v11: i64, v12: i64, v13: i64, v14: i64, v15: i64, v16: i64, v17: i64, v18: i64, v19: i64, v20: i64): call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) - jump ebb1 + jump block1 -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/isa/x86/allones_funcaddrs32.clif b/cranelift/filetests/filetests/isa/x86/allones_funcaddrs32.clif index 124803af6c..744b936c83 100644 --- a/cranelift/filetests/filetests/isa/x86/allones_funcaddrs32.clif +++ b/cranelift/filetests/filetests/isa/x86/allones_funcaddrs32.clif @@ -14,7 +14,7 @@ function %I32() { sig0 = () fn0 = %foo() -ebb0: +block0: ; asm: movl $-1, %ecx [-,%rcx] v400 = func_addr.i32 fn0 ; bin: b9 Abs4(%foo) ffffffff diff --git a/cranelift/filetests/filetests/isa/x86/allones_funcaddrs64.clif b/cranelift/filetests/filetests/isa/x86/allones_funcaddrs64.clif index f4dd9f2a6e..623e96c9d3 100644 --- a/cranelift/filetests/filetests/isa/x86/allones_funcaddrs64.clif +++ b/cranelift/filetests/filetests/isa/x86/allones_funcaddrs64.clif @@ -14,7 +14,7 @@ function %I64() { sig0 = () fn0 = %foo() -ebb0: +block0: ; asm: movabsq $-1, %rcx [-,%rcx] v400 = func_addr.i64 fn0 ; bin: 48 b9 Abs8(%foo) ffffffffffffffff diff --git a/cranelift/filetests/filetests/isa/x86/baldrdash-table-sig-reg.clif b/cranelift/filetests/filetests/isa/x86/baldrdash-table-sig-reg.clif index 6d1f72203d..e8dc4393ca 100644 --- a/cranelift/filetests/filetests/isa/x86/baldrdash-table-sig-reg.clif +++ b/cranelift/filetests/filetests/isa/x86/baldrdash-table-sig-reg.clif @@ -5,7 +5,7 @@ target i686 function u0:0(i32 vmctx) baldrdash_system_v { sig0 = (i32 vmctx, i32 sigid) baldrdash_system_v -ebb0(v0: i32): +block0(v0: i32): v2 = iconst.i32 0 v8 = iconst.i32 0 v9 = iconst.i32 0 diff --git a/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount.clif b/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount.clif index 595e5e99bb..7aca619d09 100644 --- a/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount.clif +++ b/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount.clif @@ -5,7 +5,7 @@ target x86_64 baseline ; clz/ctz on 64 bit operands function %i64_clz(i64) -> i64 { -ebb0(v10: i64): +block0(v10: i64): v11 = clz v10 ; check: x86_bsr ; check: selectif.i64 @@ -13,7 +13,7 @@ ebb0(v10: i64): } function %i64_ctz(i64) -> i64 { -ebb1(v20: i64): +block1(v20: i64): v21 = ctz v20 ; check: x86_bsf ; check: selectif.i64 @@ -24,7 +24,7 @@ ebb1(v20: i64): ; clz/ctz on 32 bit operands function %i32_clz(i32) -> i32 { -ebb0(v10: i32): +block0(v10: i32): v11 = clz v10 ; check: x86_bsr ; check: selectif.i32 @@ -32,7 +32,7 @@ ebb0(v10: i32): } function %i32_ctz(i32) -> i32 { -ebb1(v20: i32): +block1(v20: i32): v21 = ctz v20 ; check: x86_bsf ; check: selectif.i32 @@ -43,7 +43,7 @@ ebb1(v20: i32): ; popcount on 64 bit operands function %i64_popcount(i64) -> i64 { -ebb0(v30: i64): +block0(v30: i64): v31 = popcnt v30; ; check: ushr_imm ; check: iconst.i64 @@ -69,7 +69,7 @@ ebb0(v30: i64): ; popcount on 32 bit operands function %i32_popcount(i32) -> i32 { -ebb0(v40: i32): +block0(v40: i32): v41 = popcnt v40; ; check: ushr_imm ; check: iconst.i32 diff --git a/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount_encoding.clif b/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount_encoding.clif index 31c4016dc0..cbe18d904c 100644 --- a/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount_encoding.clif +++ b/cranelift/filetests/filetests/isa/x86/baseline_clz_ctz_popcount_encoding.clif @@ -8,7 +8,7 @@ target x86_64 baseline ; function %Foo() { -ebb0: +block0: ; 64-bit wide bsf [-,%r11] v10 = iconst.i64 0x1234 diff --git a/cranelift/filetests/filetests/isa/x86/binary32-float.clif b/cranelift/filetests/filetests/isa/x86/binary32-float.clif index 8a4ae5fe7e..dc65a1f234 100644 --- a/cranelift/filetests/filetests/isa/x86/binary32-float.clif +++ b/cranelift/filetests/filetests/isa/x86/binary32-float.clif @@ -13,7 +13,7 @@ function %F32() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: [-,%rcx] v0 = iconst.i32 1 [-,%rsi] v1 = iconst.i32 2 @@ -262,7 +262,7 @@ function %F64() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: [-,%rcx] v0 = iconst.i32 1 [-,%rsi] v1 = iconst.i32 2 @@ -472,53 +472,53 @@ ebb0: } function %cpuflags_float(f32 [%xmm0]) { -ebb0(v0: f32 [%xmm0]): +block0(v0: f32 [%xmm0]): ; asm: ucomiss %xmm0, %xmm0 [-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0 - jump ebb1 + jump block1 -ebb1: - ; asm: jnp ebb1 - brff ord v1, ebb1 ; bin: 7b fe - jump ebb2 +block1: + ; asm: jnp block1 + brff ord v1, block1 ; bin: 7b fe + jump block2 -ebb2: - ; asm: jp ebb1 - brff uno v1, ebb1 ; bin: 7a fc - jump ebb3 +block2: + ; asm: jp block1 + brff uno v1, block1 ; bin: 7a fc + jump block3 -ebb3: - ; asm: jne ebb1 - brff one v1, ebb1 ; bin: 75 fa - jump ebb4 +block3: + ; asm: jne block1 + brff one v1, block1 ; bin: 75 fa + jump block4 -ebb4: - ; asm: je ebb1 - brff ueq v1, ebb1 ; bin: 74 f8 - jump ebb5 +block4: + ; asm: je block1 + brff ueq v1, block1 ; bin: 74 f8 + jump block5 -ebb5: - ; asm: ja ebb1 - brff gt v1, ebb1 ; bin: 77 f6 - jump ebb6 +block5: + ; asm: ja block1 + brff gt v1, block1 ; bin: 77 f6 + jump block6 -ebb6: - ; asm: jae ebb1 - brff ge v1, ebb1 ; bin: 73 f4 - jump ebb7 +block6: + ; asm: jae block1 + brff ge v1, block1 ; bin: 73 f4 + jump block7 -ebb7: - ; asm: jb ebb1 - brff ult v1, ebb1 ; bin: 72 f2 - jump ebb8 +block7: + ; asm: jb block1 + brff ult v1, block1 ; bin: 72 f2 + jump block8 -ebb8: - ; asm: jbe ebb1 - brff ule v1, ebb1 ; bin: 76 f0 - jump ebb9 +block8: + ; asm: jbe block1 + brff ule v1, block1 ; bin: 76 f0 + jump block9 -ebb9: +block9: ; asm: jp .+4; ud2 trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b ; asm: jnp .+4; ud2 diff --git a/cranelift/filetests/filetests/isa/x86/binary32.clif b/cranelift/filetests/filetests/isa/x86/binary32.clif index 5db78ee2e0..abe99ce0ea 100644 --- a/cranelift/filetests/filetests/isa/x86/binary32.clif +++ b/cranelift/filetests/filetests/isa/x86/binary32.clif @@ -19,7 +19,7 @@ function %I32() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: ; asm: movl $1, %ecx [-,%rcx] v1 = iconst.i32 1 ; bin: b9 00000001 ; asm: movl $2, %esi @@ -486,141 +486,141 @@ ebb0: [-,%rcx,%rflags] v709, v710 = isub_ifborrow v1, v2, v707 ; bin: 19 f1 ; asm: testl %ecx, %ecx - ; asm: je ebb1 - brz v1, ebb1 ; bin: 85 c9 74 0e - fallthrough ebb3 + ; asm: je block1 + brz v1, block1 ; bin: 85 c9 74 0e + fallthrough block3 -ebb3: +block3: ; asm: testl %esi, %esi - ; asm: je ebb1 - brz v2, ebb1 ; bin: 85 f6 74 0a - fallthrough ebb4 + ; asm: je block1 + brz v2, block1 ; bin: 85 f6 74 0a + fallthrough block4 -ebb4: +block4: ; asm: testl %ecx, %ecx - ; asm: jne ebb1 - brnz v1, ebb1 ; bin: 85 c9 75 06 - fallthrough ebb5 + ; asm: jne block1 + brnz v1, block1 ; bin: 85 c9 75 06 + fallthrough block5 -ebb5: +block5: ; asm: testl %esi, %esi - ; asm: jne ebb1 - brnz v2, ebb1 ; bin: 85 f6 75 02 + ; asm: jne block1 + brnz v2, block1 ; bin: 85 f6 75 02 - ; asm: jmp ebb2 - jump ebb2 ; bin: eb 01 + ; asm: jmp block2 + jump block2 ; bin: eb 01 - ; asm: ebb1: -ebb1: + ; asm: block1: +block1: ; asm: ret return ; bin: c3 - ; asm: ebb2: -ebb2: + ; asm: block2: +block2: trap user0 ; bin: user0 0f 0b } ; Special branch encodings only for I32 mode. function %special_branches() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rsi] v2 = iconst.i32 2 [-,%rdi] v3 = icmp eq v1, v2 [-,%rbx] v4 = icmp ugt v1, v2 ; asm: testl $0xff, %edi - ; asm: je ebb1 - brz v3, ebb1 ; bin: f7 c7 000000ff 0f 84 00000015 - fallthrough ebb2 + ; asm: je block1 + brz v3, block1 ; bin: f7 c7 000000ff 0f 84 00000015 + fallthrough block2 -ebb2: +block2: ; asm: testb %bl, %bl - ; asm: je ebb1 - brz v4, ebb1 ; bin: 84 db 74 11 - fallthrough ebb3 + ; asm: je block1 + brz v4, block1 ; bin: 84 db 74 11 + fallthrough block3 -ebb3: +block3: ; asm: testl $0xff, %edi - ; asm: jne ebb1 - brnz v3, ebb1 ; bin: f7 c7 000000ff 0f 85 00000005 - fallthrough ebb4 + ; asm: jne block1 + brnz v3, block1 ; bin: f7 c7 000000ff 0f 85 00000005 + fallthrough block4 -ebb4: +block4: ; asm: testb %bl, %bl - ; asm: jne ebb1 - brnz v4, ebb1 ; bin: 84 db 75 01 - fallthrough ebb5 + ; asm: jne block1 + brnz v4, block1 ; bin: 84 db 75 01 + fallthrough block5 -ebb5: +block5: return -ebb1: +block1: return } ; CPU flag instructions. function %cpu_flags() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rsi] v2 = iconst.i32 2 - jump ebb1 + jump block1 -ebb1: +block1: ; asm: cmpl %esi, %ecx [-,%rflags] v10 = ifcmp v1, v2 ; bin: 39 f1 ; asm: cmpl %ecx, %esi [-,%rflags] v11 = ifcmp v2, v1 ; bin: 39 ce - ; asm: je ebb1 - brif eq v11, ebb1 ; bin: 74 fa - jump ebb2 + ; asm: je block1 + brif eq v11, block1 ; bin: 74 fa + jump block2 -ebb2: - ; asm: jne ebb1 - brif ne v11, ebb1 ; bin: 75 f8 - jump ebb3 +block2: + ; asm: jne block1 + brif ne v11, block1 ; bin: 75 f8 + jump block3 -ebb3: - ; asm: jl ebb1 - brif slt v11, ebb1 ; bin: 7c f6 - jump ebb4 +block3: + ; asm: jl block1 + brif slt v11, block1 ; bin: 7c f6 + jump block4 -ebb4: - ; asm: jge ebb1 - brif sge v11, ebb1 ; bin: 7d f4 - jump ebb5 +block4: + ; asm: jge block1 + brif sge v11, block1 ; bin: 7d f4 + jump block5 -ebb5: - ; asm: jg ebb1 - brif sgt v11, ebb1 ; bin: 7f f2 - jump ebb6 +block5: + ; asm: jg block1 + brif sgt v11, block1 ; bin: 7f f2 + jump block6 -ebb6: - ; asm: jle ebb1 - brif sle v11, ebb1 ; bin: 7e f0 - jump ebb7 +block6: + ; asm: jle block1 + brif sle v11, block1 ; bin: 7e f0 + jump block7 -ebb7: - ; asm: jb ebb1 - brif ult v11, ebb1 ; bin: 72 ee - jump ebb8 +block7: + ; asm: jb block1 + brif ult v11, block1 ; bin: 72 ee + jump block8 -ebb8: - ; asm: jae ebb1 - brif uge v11, ebb1 ; bin: 73 ec - jump ebb9 +block8: + ; asm: jae block1 + brif uge v11, block1 ; bin: 73 ec + jump block9 -ebb9: - ; asm: ja ebb1 - brif ugt v11, ebb1 ; bin: 77 ea - jump ebb10 +block9: + ; asm: ja block1 + brif ugt v11, block1 ; bin: 77 ea + jump block10 -ebb10: - ; asm: jbe ebb1 - brif ule v11, ebb1 ; bin: 76 e8 - jump ebb11 +block10: + ; asm: jbe block1 + brif ule v11, block1 ; bin: 76 e8 + jump block11 -ebb11: +block11: ; asm: sete %bl [-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3 @@ -690,7 +690,7 @@ ebb11: ; Tests for i32/i8 conversion instructions. function %I32_I8() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rcx] v11 = ireduce.i8 v1 ; bin: @@ -706,7 +706,7 @@ ebb0: ; Tests for i32/i16 conversion instructions. function %I32_I16() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rcx] v11 = ireduce.i16 v1 ; bin: diff --git a/cranelift/filetests/filetests/isa/x86/binary64-float.clif b/cranelift/filetests/filetests/isa/x86/binary64-float.clif index 171a3db7c9..2ec733b2c0 100644 --- a/cranelift/filetests/filetests/isa/x86/binary64-float.clif +++ b/cranelift/filetests/filetests/isa/x86/binary64-float.clif @@ -14,7 +14,7 @@ function %F32() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: [-,%r11] v0 = iconst.i32 1 [-,%rsi] v1 = iconst.i32 2 [-,%rax] v2 = iconst.i64 11 @@ -297,7 +297,7 @@ function %F64() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: [-,%r11] v0 = iconst.i32 1 [-,%rsi] v1 = iconst.i32 2 [-,%rax] v2 = iconst.i64 11 @@ -553,53 +553,53 @@ ebb0: } function %cpuflags_float(f32 [%xmm0]) { -ebb0(v0: f32 [%xmm0]): +block0(v0: f32 [%xmm0]): ; asm: ucomiss %xmm0, %xmm0 [-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0 - jump ebb1 + jump block1 -ebb1: - ; asm: jnp ebb1 - brff ord v1, ebb1 ; bin: 7b fe - jump ebb2 +block1: + ; asm: jnp block1 + brff ord v1, block1 ; bin: 7b fe + jump block2 -ebb2: - ; asm: jp ebb1 - brff uno v1, ebb1 ; bin: 7a fc - jump ebb3 +block2: + ; asm: jp block1 + brff uno v1, block1 ; bin: 7a fc + jump block3 -ebb3: - ; asm: jne ebb1 - brff one v1, ebb1 ; bin: 75 fa - jump ebb4 +block3: + ; asm: jne block1 + brff one v1, block1 ; bin: 75 fa + jump block4 -ebb4: - ; asm: je ebb1 - brff ueq v1, ebb1 ; bin: 74 f8 - jump ebb5 +block4: + ; asm: je block1 + brff ueq v1, block1 ; bin: 74 f8 + jump block5 -ebb5: - ; asm: ja ebb1 - brff gt v1, ebb1 ; bin: 77 f6 - jump ebb6 +block5: + ; asm: ja block1 + brff gt v1, block1 ; bin: 77 f6 + jump block6 -ebb6: - ; asm: jae ebb1 - brff ge v1, ebb1 ; bin: 73 f4 - jump ebb7 +block6: + ; asm: jae block1 + brff ge v1, block1 ; bin: 73 f4 + jump block7 -ebb7: - ; asm: jb ebb1 - brff ult v1, ebb1 ; bin: 72 f2 - jump ebb8 +block7: + ; asm: jb block1 + brff ult v1, block1 ; bin: 72 f2 + jump block8 -ebb8: - ; asm: jbe ebb1 - brff ule v1, ebb1 ; bin: 76 f0 - jump ebb9 +block8: + ; asm: jbe block1 + brff ule v1, block1 ; bin: 76 f0 + jump block9 -ebb9: +block9: ; asm: jp .+4; ud2 trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b ; asm: jnp .+4; ud2 diff --git a/cranelift/filetests/filetests/isa/x86/binary64-pic.clif b/cranelift/filetests/filetests/isa/x86/binary64-pic.clif index 3f3d86288c..5a2443adce 100644 --- a/cranelift/filetests/filetests/isa/x86/binary64-pic.clif +++ b/cranelift/filetests/filetests/isa/x86/binary64-pic.clif @@ -25,7 +25,7 @@ function %I64() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: ; Colocated functions. diff --git a/cranelift/filetests/filetests/isa/x86/binary64-run.clif b/cranelift/filetests/filetests/isa/x86/binary64-run.clif index 41db7e1b5a..6e6e1071c5 100644 --- a/cranelift/filetests/filetests/isa/x86/binary64-run.clif +++ b/cranelift/filetests/filetests/isa/x86/binary64-run.clif @@ -3,7 +3,7 @@ target x86_64 ; this verifies that returning b64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911 function %test_b64() -> b64 { -ebb0: +block0: [-, %r10] v0 = bconst.b64 true return v0 } diff --git a/cranelift/filetests/filetests/isa/x86/binary64.clif b/cranelift/filetests/filetests/isa/x86/binary64.clif index a3fcede60d..41290d1462 100644 --- a/cranelift/filetests/filetests/isa/x86/binary64.clif +++ b/cranelift/filetests/filetests/isa/x86/binary64.clif @@ -23,7 +23,7 @@ function %I64() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: ; Integer Constants. @@ -708,117 +708,117 @@ ebb0: istore8_complex v601, v521+v522 ; bin: heap_oob 88 0c 18 ; asm: testq %rcx, %rcx - ; asm: je ebb1 - brz v1, ebb1 ; bin: 48 85 c9 74 1b - fallthrough ebb3 + ; asm: je block1 + brz v1, block1 ; bin: 48 85 c9 74 1b + fallthrough block3 -ebb3: +block3: ; asm: testq %rsi, %rsi - ; asm: je ebb1 - brz v2, ebb1 ; bin: 48 85 f6 74 16 - fallthrough ebb4 + ; asm: je block1 + brz v2, block1 ; bin: 48 85 f6 74 16 + fallthrough block4 -ebb4: +block4: ; asm: testq %r10, %r10 - ; asm: je ebb1 - brz v3, ebb1 ; bin: 4d 85 d2 74 11 - fallthrough ebb5 + ; asm: je block1 + brz v3, block1 ; bin: 4d 85 d2 74 11 + fallthrough block5 -ebb5: +block5: ; asm: testq %rcx, %rcx - ; asm: jne ebb1 - brnz v1, ebb1 ; bin: 48 85 c9 75 0c - fallthrough ebb6 + ; asm: jne block1 + brnz v1, block1 ; bin: 48 85 c9 75 0c + fallthrough block6 -ebb6: +block6: ; asm: testq %rsi, %rsi - ; asm: jne ebb1 - brnz v2, ebb1 ; bin: 48 85 f6 75 07 - fallthrough ebb7 + ; asm: jne block1 + brnz v2, block1 ; bin: 48 85 f6 75 07 + fallthrough block7 -ebb7: +block7: ; asm: testq %r10, %r10 - ; asm: jne ebb1 - brnz v3, ebb1 ; bin: 4d 85 d2 75 02 + ; asm: jne block1 + brnz v3, block1 ; bin: 4d 85 d2 75 02 - ; asm: jmp ebb2 - jump ebb2 ; bin: eb 01 + ; asm: jmp block2 + jump block2 ; bin: eb 01 - ; asm: ebb1: -ebb1: + ; asm: block1: +block1: return ; bin: c3 - ; asm: ebb2: -ebb2: + ; asm: block2: +block2: ; Add a no-op instruction to prevent fold_redundant_jump from removing this block. ; asm: notq %rcx [-,%rcx] v5000 = bnot v1 ; bin: 48 f7 d1 - jump ebb1 ; bin: eb fa + jump block1 ; bin: eb fa } ; CPU flag instructions. function %cpu_flags_I64() { -ebb0: +block0: [-,%rcx] v1 = iconst.i64 1 [-,%r10] v2 = iconst.i64 2 - jump ebb1 + jump block1 -ebb1: +block1: ; asm: cmpq %r10, %rcx [-,%rflags] v10 = ifcmp v1, v2 ; bin: 4c 39 d1 ; asm: cmpq %rcx, %r10 [-,%rflags] v11 = ifcmp v2, v1 ; bin: 49 39 ca - ; asm: je ebb1 - brif eq v11, ebb1 ; bin: 74 f8 - jump ebb2 + ; asm: je block1 + brif eq v11, block1 ; bin: 74 f8 + jump block2 -ebb2: - ; asm: jne ebb1 - brif ne v11, ebb1 ; bin: 75 f6 - jump ebb3 +block2: + ; asm: jne block1 + brif ne v11, block1 ; bin: 75 f6 + jump block3 -ebb3: - ; asm: jl ebb1 - brif slt v11, ebb1 ; bin: 7c f4 - jump ebb4 +block3: + ; asm: jl block1 + brif slt v11, block1 ; bin: 7c f4 + jump block4 -ebb4: - ; asm: jge ebb1 - brif sge v11, ebb1 ; bin: 7d f2 - jump ebb5 +block4: + ; asm: jge block1 + brif sge v11, block1 ; bin: 7d f2 + jump block5 -ebb5: - ; asm: jg ebb1 - brif sgt v11, ebb1 ; bin: 7f f0 - jump ebb6 +block5: + ; asm: jg block1 + brif sgt v11, block1 ; bin: 7f f0 + jump block6 -ebb6: - ; asm: jle ebb1 - brif sle v11, ebb1 ; bin: 7e ee - jump ebb7 +block6: + ; asm: jle block1 + brif sle v11, block1 ; bin: 7e ee + jump block7 -ebb7: - ; asm: jb ebb1 - brif ult v11, ebb1 ; bin: 72 ec - jump ebb8 +block7: + ; asm: jb block1 + brif ult v11, block1 ; bin: 72 ec + jump block8 -ebb8: - ; asm: jae ebb1 - brif uge v11, ebb1 ; bin: 73 ea - jump ebb9 +block8: + ; asm: jae block1 + brif uge v11, block1 ; bin: 73 ea + jump block9 -ebb9: - ; asm: ja ebb1 - brif ugt v11, ebb1 ; bin: 77 e8 - jump ebb10 +block9: + ; asm: ja block1 + brif ugt v11, block1 ; bin: 77 e8 + jump block10 -ebb10: - ; asm: jbe ebb1 - brif ule v11, ebb1 ; bin: 76 e6 - jump ebb11 +block10: + ; asm: jbe block1 + brif ule v11, block1 ; bin: 76 e6 + jump block11 -ebb11: +block11: ; asm: sete %bl [-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3 @@ -896,7 +896,7 @@ function %outargs() { ss1 = outgoing_arg 8, offset 8 ss2 = outgoing_arg 8, offset 0 -ebb0: +block0: [-,%rcx] v1 = iconst.i64 1 ; asm: movq %rcx, 8(%rsp) @@ -922,7 +922,7 @@ function %I32() { ss2 = incoming_arg 1024, offset -2048 ss3 = incoming_arg 8, offset -2056 -ebb0: +block0: ; Integer Constants. @@ -1318,58 +1318,58 @@ ebb0: [-,%r10] v533 = ushr_imm v3, 31 ; bin: 41 c1 ea 1f ; asm: testl %ecx, %ecx - ; asm: je ebb1x - brz v1, ebb1 ; bin: 85 c9 74 18 - fallthrough ebb3 + ; asm: je block1x + brz v1, block1 ; bin: 85 c9 74 18 + fallthrough block3 -ebb3: +block3: ; asm: testl %esi, %esi - ; asm: je ebb1x - brz v2, ebb1 ; bin: 85 f6 74 14 - fallthrough ebb4 + ; asm: je block1x + brz v2, block1 ; bin: 85 f6 74 14 + fallthrough block4 -ebb4: +block4: ; asm: testl %r10d, %r10d - ; asm: je ebb1x - brz v3, ebb1 ; bin: 45 85 d2 74 0f - fallthrough ebb5 + ; asm: je block1x + brz v3, block1 ; bin: 45 85 d2 74 0f + fallthrough block5 -ebb5: +block5: ; asm: testl %ecx, %ecx - ; asm: jne ebb1x - brnz v1, ebb1 ; bin: 85 c9 75 0b - fallthrough ebb6 + ; asm: jne block1x + brnz v1, block1 ; bin: 85 c9 75 0b + fallthrough block6 -ebb6: +block6: ; asm: testl %esi, %esi - ; asm: jne ebb1x - brnz v2, ebb1 ; bin: 85 f6 75 07 - fallthrough ebb7 + ; asm: jne block1x + brnz v2, block1 ; bin: 85 f6 75 07 + fallthrough block7 -ebb7: +block7: ; asm: testl %r10d, %r10d - ; asm: jne ebb1x - brnz v3, ebb1 ; bin: 45 85 d2 75 02 + ; asm: jne block1x + brnz v3, block1 ; bin: 45 85 d2 75 02 - ; asm: jmp ebb2x - jump ebb2 ; bin: eb 01 + ; asm: jmp block2x + jump block2 ; bin: eb 01 - ; asm: ebb1x: -ebb1: + ; asm: block1x: +block1: return ; bin: c3 - ; asm: ebb2x: -ebb2: + ; asm: block2x: +block2: ; Add a no-op instruction to prevent fold_redundant_jump from removing this block. ; asm: notl %ecx [-,%rcx] v5000 = bnot v1 ; bin: f7 d1 - jump ebb1 ; bin: eb fb + jump block1 ; bin: eb fb } ; Tests for i32/i8 conversion instructions. function %I32_I8() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rsi] v2 = iconst.i32 2 [-,%r10] v3 = iconst.i32 3 @@ -1397,7 +1397,7 @@ ebb0: ; Tests for i32/i16 conversion instructions. function %I32_I16() { -ebb0: +block0: [-,%rcx] v1 = iconst.i32 1 [-,%rsi] v2 = iconst.i32 2 [-,%r10] v3 = iconst.i32 3 @@ -1425,7 +1425,7 @@ ebb0: ; Tests for i64/i8 conversion instructions. function %I64_I8() { -ebb0: +block0: [-,%rcx] v1 = iconst.i64 1 [-,%rsi] v2 = iconst.i64 2 [-,%r10] v3 = iconst.i64 3 @@ -1453,7 +1453,7 @@ ebb0: ; Tests for i64/i16 conversion instructions. function %I64_I16() { -ebb0: +block0: [-,%rcx] v1 = iconst.i64 1 [-,%rsi] v2 = iconst.i64 2 [-,%r10] v3 = iconst.i64 3 @@ -1481,7 +1481,7 @@ ebb0: ; Tests for i64/i32 conversion instructions. function %I64_I32() { -ebb0: +block0: [-,%rcx] v1 = iconst.i64 1 [-,%rsi] v2 = iconst.i64 2 [-,%r10] v3 = iconst.i64 3 @@ -1509,9 +1509,9 @@ ebb0: ; Tests for i64 jump table instructions. function %I64_JT(i64 [%rdi]) { - jt0 = jump_table [ebb1, ebb2, ebb3] + jt0 = jump_table [block1, block2, block3] -ebb0(v0: i64 [%rdi]): +block0(v0: i64 [%rdi]): ; Note: The next two lines will need to change whenever instructions are ; added or removed from this test. [-, %rax] v1 = jump_table_base.i64 jt0 ; bin: 48 8d 05 00000039 PCRelRodata4(jt0) @@ -1530,23 +1530,23 @@ ebb0(v0: i64 [%rdi]): [-, %r10] v32 = jump_table_entry.i64 v11, v1, 4, jt0 ; bin: 4e 63 14 a8 [-, %r10] v33 = jump_table_entry.i64 v11, v2, 4, jt0 ; bin: 4f 63 14 aa - fallthrough ebb10 + fallthrough block10 -ebb10: +block10: indirect_jump_table_br v10, jt0 ; bin: ff e3 -ebb11: +block11: indirect_jump_table_br v11, jt0 ; bin: 41 ff e5 -ebb1: - fallthrough ebb2 -ebb2: - fallthrough ebb3 -ebb3: +block1: + fallthrough block2 +block2: + fallthrough block3 +block3: trap user0 } function %r12_r13_loads() { -ebb0: +block0: [-,%r12] v1 = iconst.i64 0x0123_4567_89ab_cdef [-,%r13] v2 = iconst.i64 0xfedc_ba98_7654_3210 [-,%rax] v3 = iconst.i64 0x1 @@ -1603,7 +1603,7 @@ ebb0: } function %r12_r13_stores() { -ebb0: +block0: [-,%r12] v1 = iconst.i64 0x0123_4567_89ab_cdef [-,%r13] v2 = iconst.i64 0xfedc_ba98_7654_3210 [-,%rax] v3 = iconst.i64 0x1 @@ -1674,7 +1674,7 @@ ebb0: } function %B64() { -ebb0: +block0: [-, %rax] v1 = bconst.b64 true ; bin: 40 b8 00000001 [-, %r10] v0 = bconst.b64 true ; bin: 41 ba 00000001 return diff --git a/cranelift/filetests/filetests/isa/x86/bitrev-i128-run.clif b/cranelift/filetests/filetests/isa/x86/bitrev-i128-run.clif index effe502d9e..4d3fe2ef57 100644 --- a/cranelift/filetests/filetests/isa/x86/bitrev-i128-run.clif +++ b/cranelift/filetests/filetests/isa/x86/bitrev-i128-run.clif @@ -2,7 +2,7 @@ test run target x86_64 function %reverse_bits_zero() -> b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconcat v0, v0 v2 = bitrev.i128 v1 @@ -12,7 +12,7 @@ ebb0: ; run function %reverse_bits_one() -> b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i64 1 v2 = iconcat v0, v1 @@ -29,7 +29,7 @@ ebb0: ; run function %reverse_bits() -> b1 { -ebb0: +block0: v0 = iconst.i64 0x06AD_8667_69EC_41BA v1 = iconst.i64 0x6C83_D81A_6E28_83AB v2 = iconcat v0, v1 diff --git a/cranelift/filetests/filetests/isa/x86/br-i128-run.clif b/cranelift/filetests/filetests/isa/x86/br-i128-run.clif index bde3238462..95a1de81cf 100644 --- a/cranelift/filetests/filetests/isa/x86/br-i128-run.clif +++ b/cranelift/filetests/filetests/isa/x86/br-i128-run.clif @@ -2,36 +2,36 @@ test run target x86_64 function %br_false() -> b1 { -ebb0: +block0: v10 = iconst.i64 0x42 v11 = iconst.i64 0x00 v0 = iconcat v10, v11 - brz v0, ebb2 - jump ebb1 + brz v0, block2 + jump block1 -ebb1: +block1: v1 = bconst.b1 true return v1 -ebb2: +block2: v2 = bconst.b1 false return v2 } ; run function %br_true() -> b1 { -ebb0: +block0: v10 = iconst.i64 0x00 v11 = iconst.i64 0x00 v0 = iconcat v10, v11 - brz v0, ebb2 - jump ebb1 + brz v0, block2 + jump block1 -ebb1: +block1: v1 = bconst.b1 false return v1 -ebb2: +block2: v2 = bconst.b1 true return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/br-i128.clif b/cranelift/filetests/filetests/isa/x86/br-i128.clif index 442be8d6c8..a1778f4cf5 100644 --- a/cranelift/filetests/filetests/isa/x86/br-i128.clif +++ b/cranelift/filetests/filetests/isa/x86/br-i128.clif @@ -2,41 +2,41 @@ test compile target x86_64 function u0:0(i128) -> i8 fast { -ebb0(v0: i128): - brz v0, ebb2 +block0(v0: i128): + brz v0, block2 ; check: v0 = iconcat v3, v4 ; nextln: v5 = icmp_imm eq v3, 0 ; nextln: v6 = icmp_imm eq v4, 0 ; nextln: v7 = band v5, v6 - ; nextln: brnz v7, ebb2 - jump ebb1 + ; nextln: brnz v7, block2 + jump block1 -ebb1: +block1: v1 = iconst.i8 0 return v1 -ebb2: +block2: v2 = iconst.i8 1 return v2 } function u0:1(i128) -> i8 fast { -ebb0(v0: i128): - brnz v0, ebb2 +block0(v0: i128): + brnz v0, block2 ; check: v0 = iconcat v3, v4 - ; nextln: brnz v3, ebb2 - ; nextln: fallthrough ebb3 + ; nextln: brnz v3, block2 + ; nextln: fallthrough block3 - ; check: ebb3: - ; nextln: brnz.i64 v4, ebb2 - jump ebb1 - ; nextln: fallthrough ebb1 + ; check: block3: + ; nextln: brnz.i64 v4, block2 + jump block1 + ; nextln: fallthrough block1 -ebb1: +block1: v1 = iconst.i8 0 return v1 -ebb2: +block2: v2 = iconst.i8 1 return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/brz-i8-run.clif b/cranelift/filetests/filetests/isa/x86/brz-i8-run.clif index c050a91425..c8520830e6 100644 --- a/cranelift/filetests/filetests/isa/x86/brz-i8-run.clif +++ b/cranelift/filetests/filetests/isa/x86/brz-i8-run.clif @@ -2,32 +2,32 @@ test run target x86_64 function u0:0() -> b1 { -ebb0: +block0: v0 = iconst.i8 0 - brz v0, ebb1 - jump ebb2 + brz v0, block1 + jump block2 -ebb1: +block1: v1 = bconst.b1 true return v1 -ebb2: +block2: v2 = bconst.b1 false return v2 } ; run function u0:1() -> b1 { -ebb0: +block0: v0 = iconst.i8 0 - brnz v0, ebb1 - jump ebb2 + brnz v0, block1 + jump block2 -ebb1: +block1: v1 = bconst.b1 false return v1 -ebb2: +block2: v2 = bconst.b1 true return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/brz-i8.clif b/cranelift/filetests/filetests/isa/x86/brz-i8.clif index a5cc03c985..6c2f95c359 100644 --- a/cranelift/filetests/filetests/isa/x86/brz-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/brz-i8.clif @@ -2,37 +2,37 @@ test compile target x86_64 function u0:0() -> b1 { -ebb0: +block0: v0 = iconst.i8 0 ; check: v0 = iconst.i8 0 - brz v0, ebb1 + brz v0, block1 ; nextln: v3 = uextend.i32 v0 - ; nextln: brz v3, ebb1 - jump ebb2 + ; nextln: brz v3, block1 + jump block2 -ebb1: +block1: v1 = bconst.b1 true return v1 -ebb2: +block2: v2 = bconst.b1 false return v2 } function u0:1() -> b1 { -ebb0: +block0: v0 = iconst.i8 0 ; check: v0 = iconst.i8 0 - brnz v0, ebb1 + brnz v0, block1 ; nextln: v3 = uextend.i32 v0 - ; nextln: brnz v3, ebb1 - jump ebb2 + ; nextln: brnz v3, block1 + jump block2 -ebb1: +block1: v1 = bconst.b1 false return v1 -ebb2: +block2: v2 = bconst.b1 true return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/compile-vconst.clif b/cranelift/filetests/filetests/isa/x86/compile-vconst.clif index ee6dff07db..f2cb9259e6 100644 --- a/cranelift/filetests/filetests/isa/x86/compile-vconst.clif +++ b/cranelift/filetests/filetests/isa/x86/compile-vconst.clif @@ -5,12 +5,12 @@ target x86_64 haswell ; use baldrdash calling convention here for simplicity (avoids prologue, epilogue) function %test_vconst_i32() -> i32x4 baldrdash_system_v { -ebb0: +block0: v0 = vconst.i32x4 0x1234 return v0 } -; check: ebb0: +; check: block0: ; nextln: v0 = vconst.i32x4 0x1234 ; nextln: return v0 ; nextln: } diff --git a/cranelift/filetests/filetests/isa/x86/extend-i128-run.clif b/cranelift/filetests/filetests/isa/x86/extend-i128-run.clif index c2550b8f4e..3626e5ebf4 100644 --- a/cranelift/filetests/filetests/isa/x86/extend-i128-run.clif +++ b/cranelift/filetests/filetests/isa/x86/extend-i128-run.clif @@ -2,7 +2,7 @@ test run target x86_64 function u0:0() -> b1 { -ebb0: +block0: v0 = iconst.i64 0xffff_ffff_eeee_0000 v1 = uextend.i128 v0 v2, v3 = isplit v1 @@ -14,7 +14,7 @@ ebb0: ; run function u0:1() -> b1 { -ebb0: +block0: v0 = iconst.i64 0xffff_ffff_eeee_0000 v1 = sextend.i128 v0 v2, v3 = isplit v1 diff --git a/cranelift/filetests/filetests/isa/x86/extend-i128.clif b/cranelift/filetests/filetests/isa/x86/extend-i128.clif index 0d9e4c8aa9..db2b53276a 100644 --- a/cranelift/filetests/filetests/isa/x86/extend-i128.clif +++ b/cranelift/filetests/filetests/isa/x86/extend-i128.clif @@ -2,7 +2,7 @@ test compile target x86_64 function u0:0() -> b1 { -ebb0: +block0: v0 = iconst.i64 0xffff_ffff_eeee_0000 ; check: v0 = iconst.i64 0xffff_ffff_eeee_0000 ; nextln: v2 -> v0 @@ -20,7 +20,7 @@ ebb0: } function u0:1() -> b1 { -ebb0: +block0: v0 = iconst.i64 0xffff_ffff_eeee_0000 ; check: v0 = iconst.i64 0xffff_ffff_eeee_0000 ; nextln: v2 -> v0 diff --git a/cranelift/filetests/filetests/isa/x86/extractlane-binemit.clif b/cranelift/filetests/filetests/isa/x86/extractlane-binemit.clif index d1478b99d0..84140a23bd 100644 --- a/cranelift/filetests/filetests/isa/x86/extractlane-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/extractlane-binemit.clif @@ -6,7 +6,7 @@ target x86_64 haswell ; booleans use x86_pextr which is manually placed in the IR so that it can be binemit-tested function %test_extractlane_b8() { -ebb0: +block0: [-, %rax] v0 = bconst.b8 true [-, %xmm0] v1 = splat.b8x16 v0 [-, %rax] v2 = x86_pextr v1, 10 ; bin: 66 0f 3a 14 c0 0a @@ -14,7 +14,7 @@ ebb0: } function %test_extractlane_i16() { -ebb0: +block0: [-, %rax] v0 = iconst.i16 4 [-, %xmm1] v1 = splat.i16x8 v0 [-, %rax] v2 = x86_pextr v1, 4 ; bin: 66 0f 3a 15 c8 04 @@ -22,7 +22,7 @@ ebb0: } function %test_extractlane_i32() { -ebb0: +block0: [-, %rax] v0 = iconst.i32 42 [-, %xmm4] v1 = splat.i32x4 v0 [-, %rcx] v2 = x86_pextr v1, 2 ; bin: 66 0f 3a 16 e1 02 @@ -30,7 +30,7 @@ ebb0: } function %test_extractlane_b64() { -ebb0: +block0: [-, %rax] v0 = bconst.b64 false [-, %xmm2] v1 = splat.b64x2 v0 [-, %rbx] v2 = x86_pextr v1, 1 ; bin: 66 48 0f 3a 16 d3 01 diff --git a/cranelift/filetests/filetests/isa/x86/extractlane-run.clif b/cranelift/filetests/filetests/isa/x86/extractlane-run.clif index adb2e7b8e6..4e1d735bfe 100644 --- a/cranelift/filetests/filetests/isa/x86/extractlane-run.clif +++ b/cranelift/filetests/filetests/isa/x86/extractlane-run.clif @@ -2,7 +2,7 @@ test run set enable_simd function %test_extractlane_b8() -> b8 { -ebb0: +block0: v1 = vconst.b8x16 [false false false false false false false false false false true false false false false false] v2 = extractlane v1, 10 @@ -11,7 +11,7 @@ ebb0: ; run function %test_extractlane_i16() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 0x00080007000600050004000300020001 v1 = extractlane v0, 1 v2 = icmp_imm eq v1, 2 @@ -20,7 +20,7 @@ ebb0: ; run function %test_extractlane_f32() -> b1 { -ebb0: +block0: v0 = f32const 0x42.42 v1 = vconst.f32x4 [0x00.00 0x00.00 0x00.00 0x42.42] v2 = extractlane v1, 3 @@ -30,7 +30,7 @@ ebb0: ; run function %test_extractlane_i32_with_vector_reuse() -> b1 { -ebb0: +block0: v0 = iconst.i32 42 v1 = iconst.i32 99 @@ -49,7 +49,7 @@ ebb0: ; run function %test_extractlane_f32_with_vector_reuse() -> b1 { -ebb0: +block0: v0 = f32const 0x42.42 v1 = f32const 0x99.99 diff --git a/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants-32bit.clif b/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants-32bit.clif index 8021375558..4d736287e0 100644 --- a/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants-32bit.clif +++ b/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants-32bit.clif @@ -3,14 +3,14 @@ test binemit target i686 function %foo() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm0, %xmm0 [-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0 return v0 } function %bar() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm0, %xmm0 [-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0 return v1 diff --git a/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants.clif b/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants.clif index 049320870e..25cd686996 100644 --- a/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants.clif +++ b/cranelift/filetests/filetests/isa/x86/floating-point-zero-constants.clif @@ -3,28 +3,28 @@ test binemit target x86_64 function %zero_const_32bit_no_rex() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm0, %xmm0 [-,%xmm0] v0 = f32const 0.0 ; bin: 40 0f 57 c0 return v0 } function %zero_const_32bit_rex() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm8, %xmm8 [-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0 return v1 } function %zero_const_64bit_no_rex() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm0, %xmm0 [-,%xmm0] v0 = f64const 0.0 ; bin: 66 40 0f 57 c0 return v0 } function %zero_const_64bit_rex() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm8, %xmm8 [-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0 return v1 diff --git a/cranelift/filetests/filetests/isa/x86/i128-isplit-forward-jump.clif b/cranelift/filetests/filetests/isa/x86/i128-isplit-forward-jump.clif index b5144203ac..493d2e6365 100644 --- a/cranelift/filetests/filetests/isa/x86/i128-isplit-forward-jump.clif +++ b/cranelift/filetests/filetests/isa/x86/i128-isplit-forward-jump.clif @@ -2,24 +2,24 @@ test compile target x86_64 function u0:0() -> i128 system_v { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i64 0 v2 = iconcat v0, v1 - jump ebb5 + jump block5 -ebb2: - jump ebb4(v27) +block2: + jump block4(v27) -ebb4(v23: i128): +block4(v23: i128): return v23 -ebb5: +block5: v27 = bxor.i128 v2, v2 v32 = iconst.i32 0 - brz v32, ebb2 - jump ebb6 + brz v32, block2 + jump block6 -ebb6: +block6: trap user0 } diff --git a/cranelift/filetests/filetests/isa/x86/i128.clif b/cranelift/filetests/filetests/isa/x86/i128.clif index b710a7430e..028fb6e551 100644 --- a/cranelift/filetests/filetests/isa/x86/i128.clif +++ b/cranelift/filetests/filetests/isa/x86/i128.clif @@ -2,8 +2,8 @@ test compile target x86_64 function u0:0(i64, i64) -> i128 fast { -ebb0(v0: i64, v1: i64): -;check: ebb0(v0: i64 [%rdi], v1: i64 [%rsi], v3: i64 [%rbp]): +block0(v0: i64, v1: i64): +;check: block0(v0: i64 [%rdi], v1: i64 [%rsi], v3: i64 [%rbp]): v2 = iconcat.i64 v0, v1 ; check: regmove v0, %rdi -> %rax @@ -15,8 +15,8 @@ ebb0(v0: i64, v1: i64): } function u0:1(i128) -> i64, i64 fast { -ebb0(v0: i128): -; check: ebb0(v3: i64 [%rdi], v4: i64 [%rsi], v5: i64 [%rbp]): +block0(v0: i128): +; check: block0(v3: i64 [%rdi], v4: i64 [%rsi], v5: i64 [%rbp]): v1, v2 = isplit v0 ; check: regmove v3, %rdi -> %rax @@ -28,8 +28,8 @@ ebb0(v0: i128): } function u0:2(i64, i128) fast { -; check: ebb0(v0: i64 [%rdi], v2: i64 [%rsi], v3: i64 [%rdx], v6: i64 [%rbp]): -ebb0(v0: i64, v1: i128): +; check: block0(v0: i64 [%rdi], v2: i64 [%rsi], v3: i64 [%rdx], v6: i64 [%rbp]): +block0(v0: i64, v1: i128): ; check: store v2, v0+8 ; check: store v3, v0+16 store v1, v0+8 @@ -37,7 +37,7 @@ ebb0(v0: i64, v1: i128): } function u0:3(i64) -> i128 fast { -ebb0(v0: i64): +block0(v0: i64): ; check: v2 = load.i64 v0+8 ; check: v3 = load.i64 v0+16 v1 = load.i128 v0+8 diff --git a/cranelift/filetests/filetests/isa/x86/icmp-compile.clif b/cranelift/filetests/filetests/isa/x86/icmp-compile.clif index cf9cb3ff07..4a4ac0fc59 100644 --- a/cranelift/filetests/filetests/isa/x86/icmp-compile.clif +++ b/cranelift/filetests/filetests/isa/x86/icmp-compile.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %icmp_i8x16() { -ebb0: +block0: [-, %xmm3] v0 = vconst.i8x16 0x00 ; bin: 66 0f ef db [-, %xmm4] v1 = vconst.i8x16 0xffffffffffffffffffffffffffffffff ; bin: 66 0f 74 e4 [-, %xmm3] v2 = icmp eq v0, v1 ; bin: 66 0f 74 dc @@ -11,7 +11,7 @@ ebb0: } function %icmp_i16x8() { -ebb0: +block0: [-, %xmm0] v0 = vconst.i16x8 0x00 [-, %xmm7] v1 = vconst.i16x8 0xffffffffffffffffffffffffffffffff [-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 75 c7 @@ -19,7 +19,7 @@ ebb0: } function %icmp_i32x4() { -ebb0: +block0: [-, %xmm0] v0 = vconst.i32x4 0x00 [-, %xmm4] v1 = vconst.i32x4 0xffffffffffffffffffffffffffffffff [-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 76 c4 @@ -27,7 +27,7 @@ ebb0: } function %icmp_i64x2() { -ebb0: +block0: [-, %xmm0] v0 = vconst.i64x2 0x00 [-, %xmm1] v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff [-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 38 29 c1 diff --git a/cranelift/filetests/filetests/isa/x86/icmp-i128.clif b/cranelift/filetests/filetests/isa/x86/icmp-i128.clif index 8816d22d69..dce0e1db87 100644 --- a/cranelift/filetests/filetests/isa/x86/icmp-i128.clif +++ b/cranelift/filetests/filetests/isa/x86/icmp-i128.clif @@ -2,7 +2,7 @@ test run target x86_64 haswell function %test_icmp_eq_i128() -> b1 { -ebb0: +block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 v1 = iconcat v11, v12 @@ -16,7 +16,7 @@ ebb0: ; run function %test_icmp_imm_eq_i128() -> b1 { -ebb0: +block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 v1 = iconcat v11, v12 @@ -27,7 +27,7 @@ ebb0: ; run function %test_icmp_ne_i128() -> b1 { -ebb0: +block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 v1 = iconcat v11, v12 @@ -41,7 +41,7 @@ ebb0: ; run function %test_icmp_imm_ne_i128() -> b1 { -ebb0: +block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 v1 = iconcat v11, v12 diff --git a/cranelift/filetests/filetests/isa/x86/icmp-run.clif b/cranelift/filetests/filetests/isa/x86/icmp-run.clif index c470af662a..0820cac013 100644 --- a/cranelift/filetests/filetests/isa/x86/icmp-run.clif +++ b/cranelift/filetests/filetests/isa/x86/icmp-run.clif @@ -2,7 +2,7 @@ test run set enable_simd function %run_icmp_i8x16() -> b8 { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = vconst.i8x16 0x00 v2 = icmp eq v0, v1 @@ -13,7 +13,7 @@ ebb0: ; run function %run_icmp_i64x2() -> b64 { -ebb0: +block0: v0 = vconst.i64x2 0xffffffffffffffffffffffffffffffff v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff v2 = icmp eq v0, v1 diff --git a/cranelift/filetests/filetests/isa/x86/imul-i128.clif b/cranelift/filetests/filetests/isa/x86/imul-i128.clif index 2d683a32dd..65d21463fd 100644 --- a/cranelift/filetests/filetests/isa/x86/imul-i128.clif +++ b/cranelift/filetests/filetests/isa/x86/imul-i128.clif @@ -2,7 +2,7 @@ test run target x86_64 haswell function %test_imul_i128() -> b1 { -ebb0: +block0: v11 = iconst.i64 0xf2347ac4503f1e24 v12 = iconst.i64 0x0098fe985354ab06 v1 = iconcat v11, v12 diff --git a/cranelift/filetests/filetests/isa/x86/insertlane-binemit.clif b/cranelift/filetests/filetests/isa/x86/insertlane-binemit.clif index c388ed6fae..4be35a47b3 100644 --- a/cranelift/filetests/filetests/isa/x86/insertlane-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/insertlane-binemit.clif @@ -6,7 +6,7 @@ target x86_64 haswell ; booleans use x86_pinsr which is manually placed in the IR so that it can be binemit-tested function %test_insertlane_b8() { -ebb0: +block0: [-, %rax] v0 = bconst.b8 true [-, %rbx] v1 = bconst.b8 false [-, %xmm0] v2 = splat.b8x16 v0 @@ -15,7 +15,7 @@ ebb0: } function %test_insertlane_i16() { -ebb0: +block0: [-, %rax] v0 = iconst.i16 4 [-, %rbx] v1 = iconst.i16 5 [-, %xmm1] v2 = splat.i16x8 v0 @@ -24,7 +24,7 @@ ebb0: } function %test_insertlane_i32() { -ebb0: +block0: [-, %rax] v0 = iconst.i32 42 [-, %rbx] v1 = iconst.i32 99 [-, %xmm4] v2 = splat.i32x4 v0 @@ -33,7 +33,7 @@ ebb0: } function %test_insertlane_b64() { -ebb0: +block0: [-, %rax] v0 = bconst.b64 true [-, %rbx] v1 = bconst.b64 false [-, %xmm2] v2 = splat.b64x2 v0 diff --git a/cranelift/filetests/filetests/isa/x86/insertlane-run.clif b/cranelift/filetests/filetests/isa/x86/insertlane-run.clif index 92fb38202e..8f1cd7ef46 100644 --- a/cranelift/filetests/filetests/isa/x86/insertlane-run.clif +++ b/cranelift/filetests/filetests/isa/x86/insertlane-run.clif @@ -4,7 +4,7 @@ set enable_simd ; TODO once SIMD vector comparison is implemented, remove use of extractlane below function %test_insertlane_b8() -> b8 { -ebb0: +block0: v1 = bconst.b8 true v2 = vconst.b8x16 [false false false false false false false false false false false false false false false false] @@ -15,7 +15,7 @@ ebb0: ; run function %test_insertlane_f32() -> b1 { -ebb0: +block0: v0 = f32const 0x42.42 v1 = vconst.f32x4 0x00 v2 = insertlane v1, 1, v0 @@ -26,7 +26,7 @@ ebb0: ; run function %test_insertlane_f64_lane1() -> b1 { -ebb0: +block0: v0 = f64const 0x42.42 v1 = vconst.f64x2 0x00 v2 = insertlane v1, 1, v0 @@ -37,7 +37,7 @@ ebb0: ; run function %test_insertlane_f64_lane0() -> b1 { -ebb0: +block0: v0 = f64const 0x42.42 v1 = vconst.f64x2 0x00 v2 = insertlane v1, 0, v0 diff --git a/cranelift/filetests/filetests/isa/x86/ireduce-i16-to-i8.clif b/cranelift/filetests/filetests/isa/x86/ireduce-i16-to-i8.clif index 0f8303dfc4..2a283af485 100644 --- a/cranelift/filetests/filetests/isa/x86/ireduce-i16-to-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/ireduce-i16-to-i8.clif @@ -2,7 +2,7 @@ test compile target x86_64 function u0:0(i16) -> i8 fast { -ebb0(v0: i16): +block0(v0: i16): v1 = ireduce.i8 v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x86/isplit-not-legalized-twice.clif b/cranelift/filetests/filetests/isa/x86/isplit-not-legalized-twice.clif index 4b81a186da..c3ace05158 100644 --- a/cranelift/filetests/filetests/isa/x86/isplit-not-legalized-twice.clif +++ b/cranelift/filetests/filetests/isa/x86/isplit-not-legalized-twice.clif @@ -2,10 +2,10 @@ test compile target x86_64 function u0:0(i64, i64) -> i128 system_v { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): trap user0 -ebb30: +block30: v245 = iconst.i64 0 v246 = iconcat v245, v245 ; The next instruction used to be legalized twice, causing a panic the second time. @@ -13,7 +13,7 @@ ebb30: v252, v253 = isplit v246 trap user0 -ebb45: +block45: v369 = iconst.i64 0 v370 = load.i128 v369 trap user0 diff --git a/cranelift/filetests/filetests/isa/x86/isub_imm-i8.clif b/cranelift/filetests/filetests/isa/x86/isub_imm-i8.clif index 35698c9abc..018ac95fbc 100644 --- a/cranelift/filetests/filetests/isa/x86/isub_imm-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/isub_imm-i8.clif @@ -3,7 +3,7 @@ set opt_level=speed_and_size target x86_64 function u0:0(i8) -> i8 fast { -ebb0(v0: i8): +block0(v0: i8): v1 = iconst.i8 0 v2 = isub v1, v0 ; check: v3 = uextend.i32 v0 diff --git a/cranelift/filetests/filetests/isa/x86/jump_i128_param_unused.clif b/cranelift/filetests/filetests/isa/x86/jump_i128_param_unused.clif index 9d96fcbe31..19f22c3906 100644 --- a/cranelift/filetests/filetests/isa/x86/jump_i128_param_unused.clif +++ b/cranelift/filetests/filetests/isa/x86/jump_i128_param_unused.clif @@ -2,9 +2,9 @@ test compile target x86_64 function u0:0(i128) system_v { -ebb0(v0: i128): - jump ebb1(v0) +block0(v0: i128): + jump block1(v0) -ebb1(v1: i128): +block1(v1: i128): return } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-bint-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-bint-i8.clif index b2684ae105..dec3416a89 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-bint-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-bint-i8.clif @@ -3,7 +3,7 @@ test compile target x86_64 function u0:0() -> i8 fast { -ebb0: +block0: v14 = bconst.b1 false v15 = bint.i8 v14 return v15 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-bnot.clif b/cranelift/filetests/filetests/isa/x86/legalize-bnot.clif index 07a41dfdc4..dbd1397e45 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-bnot.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-bnot.clif @@ -8,15 +8,15 @@ function u0:51(i64, i64) system_v { ss2 = explicit_slot 1 ss3 = explicit_slot 1 -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = stack_addr.i64 ss1 v3 = load.i8 v1 store v3, v2 v4 = stack_addr.i64 ss2 v5 = stack_addr.i64 ss3 - jump ebb1 + jump block1 -ebb1: +block1: v6 = load.i8 v2 store v6, v5 v7 = load.i8 v5 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-br-icmp.clif b/cranelift/filetests/filetests/isa/x86/legalize-br-icmp.clif index 2f8ce5d78a..5c4004a539 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-br-icmp.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-br-icmp.clif @@ -3,44 +3,44 @@ test legalizer target x86_64 function %br_icmp(i64) fast { -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i64 0 - br_icmp eq v0, v1, ebb1 - jump ebb1 + br_icmp eq v0, v1, block1 + jump block1 -ebb1: +block1: return } ; sameln: function %br_icmp(i64 [%rdi]) fast { -; nextln: ebb0(v0: i64): +; nextln: block0(v0: i64): ; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0 ; nextln: [DynRexOp1icscc#8039] v2 = icmp eq v0, v1 -; nextln: [RexOp1t8jccb#75] brnz v2, ebb1 -; nextln: [Op1jmpb#eb] jump ebb1 +; nextln: [RexOp1t8jccb#75] brnz v2, block1 +; nextln: [Op1jmpb#eb] jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: [Op1ret#c3] return ; nextln: } -function %br_icmp_ebb_args(i64) fast { -ebb0(v0: i64): +function %br_icmp_args(i64) fast { +block0(v0: i64): v1 = iconst.i64 0 - br_icmp eq v0, v1, ebb1(v0) - jump ebb1(v0) + br_icmp eq v0, v1, block1(v0) + jump block1(v0) -ebb1(v2: i64): +block1(v2: i64): return } -; sameln: function %br_icmp_ebb_args(i64 [%rdi]) fast { -; nextln: ebb0(v0: i64): +; sameln: function %br_icmp_args(i64 [%rdi]) fast { +; nextln: block0(v0: i64): ; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0 ; nextln: [DynRexOp1icscc#8039] v3 = icmp eq v0, v1 -; nextln: [RexOp1t8jccb#75] brnz v3, ebb1(v0) -; nextln: [Op1jmpb#eb] jump ebb1(v0) +; nextln: [RexOp1t8jccb#75] brnz v3, block1(v0) +; nextln: [Op1jmpb#eb] jump block1(v0) ; nextln: -; nextln: ebb1(v2: i64): +; nextln: block1(v2: i64): ; nextln: [Op1ret#c3] return ; nextln: } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-br-table.clif b/cranelift/filetests/filetests/isa/x86/legalize-br-table.clif index e9464e6219..b9ed036755 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-br-table.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-br-table.clif @@ -2,20 +2,20 @@ test compile set opt_level=speed_and_size target x86_64 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function u0:0(i64) system_v { ss0 = explicit_slot 1 - jt0 = jump_table [ebb1] + jt0 = jump_table [block1] -ebb0(v0: i64): +block0(v0: i64): v1 = stack_addr.i64 ss0 v2 = load.i8 v1 - br_table v2, ebb2, jt0 + br_table v2, block2, jt0 ; check: $(oob=$V) = ifcmp_imm $(idx=$V), 1 -; ebb2 is replaced by ebb1 by fold_redundant_jump -; nextln: brif uge $oob, ebb1 -; nextln: fallthrough $(inb=$EBB) +; block2 is replaced by block1 by fold_redundant_jump +; nextln: brif uge $oob, block1 +; nextln: fallthrough $(inb=$BB) ; check: $inb: ; nextln: $(final_idx=$V) = uextend.i64 $idx ; nextln: $(base=$V) = jump_table_base.i64 jt0 @@ -23,9 +23,9 @@ ebb0(v0: i64): ; nextln: $(addr=$V) = iadd $base, $rel_addr ; nextln: indirect_jump_table_br $addr, jt0 -ebb2: - jump ebb1 +block2: + jump block1 -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-byte-ops-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-byte-ops-i8.clif index b0a318b8d4..2c8c8612d6 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-byte-ops-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-byte-ops-i8.clif @@ -7,7 +7,7 @@ function u0:0(i8, i8) fast { fn0 = %black_box(i8) ss0 = explicit_slot 1 ; black box -ebb0(v0: i8, v1: i8): +block0(v0: i8, v1: i8): v99 = stack_addr.i64 ss0 ; check: istore8 $(V), $(V) diff --git a/cranelift/filetests/filetests/isa/x86/legalize-call.clif b/cranelift/filetests/filetests/isa/x86/legalize-call.clif index 240b075374..c761a8d5aa 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-call.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-call.clif @@ -5,7 +5,7 @@ target x86_64 haswell function %call() { fn0 = %foo() -ebb0: +block0: call fn0() return } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-clz-ctz-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-clz-ctz-i8.clif index 914bcb0e30..8e63f1e0c6 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-clz-ctz-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-clz-ctz-i8.clif @@ -4,7 +4,7 @@ target x86_64 ; regex: V=v\d+ function u0:0(i8) -> i8, i8 fast { -ebb0(v0: i8): +block0(v0: i8): v1 = clz v0 ; check: v3 = uextend.i32 v0 ; nextln: v6 = iconst.i32 -1 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-custom.clif b/cranelift/filetests/filetests/isa/x86/legalize-custom.clif index 2657bfd497..c2bc6bec19 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-custom.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-custom.clif @@ -4,36 +4,36 @@ target i686 target x86_64 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %cond_trap(i32) { -ebb0(v1: i32): +block0(v1: i32): trapz v1, user67 return - ; check: ebb0(v1: i32 + ; check: block0(v1: i32 ; nextln: $(f=$V) = ifcmp_imm v1, 0 ; nextln: trapif eq $f, user67 ; nextln: return } function %cond_trap2(i32) { -ebb0(v1: i32): +block0(v1: i32): trapnz v1, int_ovf return - ; check: ebb0(v1: i32 + ; check: block0(v1: i32 ; nextln: $(f=$V) = ifcmp_imm v1, 0 ; nextln: trapif ne $f, int_ovf ; nextln: return } function %cond_trap_b1(i32) { -ebb0(v1: i32): +block0(v1: i32): v2 = icmp_imm eq v1, 6 trapz v2, user7 return - ; check: ebb0(v1: i32 - ; check: brnz v2, $(new=$EBB) - ; check: jump $(trap=$EBB) + ; check: block0(v1: i32 + ; check: brnz v2, $(new=$BB) + ; check: jump $(trap=$BB) ; check: $trap: ; nextln: trap user7 ; check: $new: @@ -41,13 +41,13 @@ ebb0(v1: i32): } function %cond_trap2_b1(i32) { -ebb0(v1: i32): +block0(v1: i32): v2 = icmp_imm eq v1, 6 trapnz v2, user9 return - ; check: ebb0(v1: i32 - ; check: brz v2, $(new=$EBB) - ; check: jump $(trap=$EBB) + ; check: block0(v1: i32 + ; check: brz v2, $(new=$BB) + ; check: jump $(trap=$BB) ; check: $trap: ; nextln: trap user9 ; check: $new: @@ -55,7 +55,7 @@ ebb0(v1: i32): } function %f32const() -> f32 { -ebb0: +block0: v1 = f32const 0x1.0p1 ; check: $(tmp=$V) = iconst.i32 ; check: v1 = bitcast.f32 $tmp @@ -63,9 +63,9 @@ ebb0: } function %select_f64(f64, f64, i32) -> f64 { -ebb0(v0: f64, v1: f64, v2: i32): +block0(v0: f64, v1: f64, v2: i32): v3 = select v2, v0, v1 - ; check: brnz v2, $(new=$EBB)(v0) + ; check: brnz v2, $(new=$BB)(v0) ; nextln: jump $new(v1) ; check: $new(v3: f64): ; nextln: return v3 @@ -73,19 +73,19 @@ ebb0(v0: f64, v1: f64, v2: i32): } function %f32_min(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fmin v0, v1 return v2 ; check: $(vnat=$V) = x86_fmin.f32 v0, v1 - ; nextln: jump $(done=$EBB)($vnat) + ; nextln: jump $(done=$BB)($vnat) - ; check: $(uno=$EBB): + ; check: $(uno=$BB): ; nextln: $(vuno=$V) = fadd.f32 v0, v1 - ; nextln: jump $(done=$EBB)($vuno) + ; nextln: jump $(done=$BB)($vuno) - ; check: $(ueq=$EBB): + ; check: $(ueq=$BB): ; check: $(veq=$V) = bor.f32 v0, v1 - ; nextln: jump $(done=$EBB)($veq) + ; nextln: jump $(done=$BB)($veq) ; check: $done(v2: f32): ; nextln: return v2 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-div-traps.clif b/cranelift/filetests/filetests/isa/x86/legalize-div-traps.clif index 2622ae48f3..1be81ec186 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-div-traps.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-div-traps.clif @@ -5,11 +5,11 @@ set avoid_div_traps=1 target x86_64 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %udiv(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = udiv v0, v1 ; nextln: $(fz=$V) = ifcmp_imm v1, 0 ; nextln: trapif eq $fz, int_divz @@ -20,8 +20,8 @@ ebb0(v0: i64, v1: i64): } function %udiv_0(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 0 ; nextln: v1 = iconst.i64 0 v2 = udiv v0, v1 @@ -34,8 +34,8 @@ ebb0(v0: i64): } function %udiv_minus_1(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 -1 ; nextln: v1 = iconst.i64 -1 v2 = udiv v0, v1 @@ -46,8 +46,8 @@ ebb0(v0: i64): } function %urem(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = urem v0, v1 ; nextln: $(fz=$V) = ifcmp_imm v1, 0 ; nextln: trapif eq $fz, int_divz @@ -58,8 +58,8 @@ ebb0(v0: i64, v1: i64): } function %urem_0(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 0 ; nextln: v1 = iconst.i64 0 v2 = urem v0, v1 @@ -72,8 +72,8 @@ ebb0(v0: i64): } function %urem_minus_1(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 -1 ; nextln: v1 = iconst.i64 -1 v2 = urem v0, v1 @@ -84,16 +84,16 @@ ebb0(v0: i64): } function %sdiv(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = sdiv v0, v1 ; nextln: $(fz=$V) = ifcmp_imm v1, 0 ; nextln: trapif eq $fz, int_divz ; nextln: $(fm1=$V) = ifcmp_imm v1, -1 - ; nextln: brif eq $fm1, $(m1=$EBB) + ; nextln: brif eq $fm1, $(m1=$BB) ; check: $(hi=$V) = sshr_imm ; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 - ; nextln: jump $(done=$EBB)($q) + ; nextln: jump $(done=$BB)($q) ; check: $m1: ; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000 ; nextln: $(fm=$V) = ifcmp.i64 v0, $imin @@ -104,8 +104,8 @@ ebb0(v0: i64, v1: i64): } function %sdiv_0(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 0 ; nextln: v1 = iconst.i64 0 v2 = sdiv v0, v1 @@ -118,16 +118,16 @@ ebb0(v0: i64): } function %sdiv_minus_1(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 -1 ; nextln: v1 = iconst.i64 -1 v2 = sdiv v0, v1 ; nextln: $(fm1=$V) = ifcmp_imm v1, -1 - ; nextln: brif eq $fm1, $(m1=$EBB) + ; nextln: brif eq $fm1, $(m1=$BB) ; check: $(hi=$V) = sshr_imm ; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 - ; nextln: jump $(done=$EBB)($q) + ; nextln: jump $(done=$BB)($q) ; check: $m1: ; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000 ; nextln: $(fm=$V) = ifcmp.i64 v0, $imin @@ -140,27 +140,27 @@ ebb0(v0: i64): ; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1. ; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern. function %srem(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = srem v0, v1 ; nextln: $(fz=$V) = ifcmp_imm v1, 0 ; nextln: trapif eq $fz, int_divz ; nextln: $(fm1=$V) = ifcmp_imm v1, -1 - ; nextln: brif eq $fm1, $(m1=$EBB) + ; nextln: brif eq $fm1, $(m1=$BB) ; check: $(hi=$V) = sshr_imm ; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 - ; nextln: jump $(done=$EBB)($r) + ; nextln: jump $(done=$BB)($r) ; check: $m1: ; nextln: $(zero=$V) = iconst.i64 0 - ; nextln: jump $(done=$EBB)($zero) + ; nextln: jump $(done=$BB)($zero) ; check: $done(v2: i64): return v2 ; nextln: return v2 } function %srem_0(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 0 ; nextln: v1 = iconst.i64 0 v2 = srem v0, v1 @@ -173,19 +173,19 @@ ebb0(v0: i64): } function %srem_minus_1(i64) -> i64 { -ebb0(v0: i64): - ; check: ebb0( +block0(v0: i64): + ; check: block0( v1 = iconst.i64 -1 ; nextln: v1 = iconst.i64 -1 v2 = srem v0, v1 ; nextln: $(fm1=$V) = ifcmp_imm v1, -1 - ; nextln: brif eq $fm1, $(m1=$EBB) + ; nextln: brif eq $fm1, $(m1=$BB) ; check: $(hi=$V) = sshr_imm ; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 - ; nextln: jump $(done=$EBB)($r) + ; nextln: jump $(done=$BB)($r) ; check: $m1: ; nextln: $(zero=$V) = iconst.i64 0 - ; nextln: jump $(done=$EBB)($zero) + ; nextln: jump $(done=$BB)($zero) ; check: $done(v2: i64): return v2 ; nextln: return v2 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-div.clif b/cranelift/filetests/filetests/isa/x86/legalize-div.clif index be30accbc6..b9f115b85b 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-div.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-div.clif @@ -5,11 +5,11 @@ set avoid_div_traps=0 target x86_64 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %udiv(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = udiv v0, v1 ; nextln: $(hi=$V) = iconst.i64 0 ; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1 @@ -18,8 +18,8 @@ ebb0(v0: i64, v1: i64): } function %urem(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = urem v0, v1 ; nextln: $(hi=$V) = iconst.i64 0 ; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1 @@ -28,8 +28,8 @@ ebb0(v0: i64, v1: i64): } function %sdiv(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = sdiv v0, v1 ; check: $(hi=$V) = sshr_imm ; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 @@ -40,17 +40,17 @@ ebb0(v0: i64, v1: i64): ; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1. ; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern. function %srem(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): - ; check: ebb0( +block0(v0: i64, v1: i64): + ; check: block0( v2 = srem v0, v1 ; nextln: $(fm1=$V) = ifcmp_imm v1, -1 - ; nextln: brif eq $fm1, $(m1=$EBB) + ; nextln: brif eq $fm1, $(m1=$BB) ; check: $(hi=$V) = sshr_imm ; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1 - ; nextln: jump $(done=$EBB)($r) + ; nextln: jump $(done=$BB)($r) ; check: $m1: ; nextln: $(zero=$V) = iconst.i64 0 - ; nextln: jump $(done=$EBB)($zero) + ; nextln: jump $(done=$BB)($zero) ; check: $done(v2: i64): return v2 ; nextln: return v2 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-f64const-x64.clif b/cranelift/filetests/filetests/isa/x86/legalize-f64const-x64.clif index addafe90a3..382c6ba80a 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-f64const-x64.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-f64const-x64.clif @@ -5,7 +5,7 @@ target x86_64 ; regex: V=v\d+ function %f64const() -> f64 { -ebb0: +block0: v1 = f64const 0x1.0p1 ; check: $(tmp=$V) = iconst.i64 ; check: v1 = bitcast.f64 $tmp diff --git a/cranelift/filetests/filetests/isa/x86/legalize-fcvt_from_usint-i16.clif b/cranelift/filetests/filetests/isa/x86/legalize-fcvt_from_usint-i16.clif index b7d4b80977..c11e77f2c7 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-fcvt_from_usint-i16.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-fcvt_from_usint-i16.clif @@ -2,13 +2,13 @@ test compile target x86_64 function u0:0(i16) -> f64 fast { -ebb0(v0: i16): +block0(v0: i16): v1 = fcvt_from_uint.f64 v0 return v1 } function u0:1(i16) -> f64 fast { -ebb0(v0: i16): +block0(v0: i16): v1 = fcvt_from_sint.f64 v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif b/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif index c2f1ccb2d9..5fb080f8a6 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif @@ -2,7 +2,7 @@ test legalizer target x86_64 ; Test legalization for various forms of heap addresses. -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %heap_addrs(i32, i64, i64 vmctx) { gv4 = vmctx @@ -29,7 +29,7 @@ function %heap_addrs(i32, i64, i64 vmctx) { ; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, offset_guard 0x8000_0000, index_type i64 ; check: heap7 = dynamic gv1, min 0, bound gv2, offset_guard 4096, index_type i64 -ebb0(v0: i32, v1: i64, v3: i64): +block0(v0: i32, v1: i64, v3: i64): ; The fast-path; 32-bit index, static heap with a sufficient bound, no bounds check needed! v4 = heap_addr.i64 heap0, v0, 0 ; check: v12 = uextend.i64 v0 @@ -38,8 +38,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v5 = heap_addr.i64 heap1, v0, 0 ; check: v14 = icmp_imm ugt v0, 0x0001_0000 - ; check: brz v14, $(resume_1=$EBB) - ; nextln: jump $(trap_1=$EBB) + ; check: brz v14, $(resume_1=$BB) + ; nextln: jump $(trap_1=$BB) ; check: $trap_1: ; nextln: trap heap_oob ; check: $resume_1: @@ -50,8 +50,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v6 = heap_addr.i64 heap2, v1, 0 ; check: v19 = iconst.i64 0x0001_0000_0000 ; check: v17 = icmp.i64 ugt v1, v19 - ; check: brz v17, $(resume_2=$EBB) - ; nextln: jump $(trap_2=$EBB) + ; check: brz v17, $(resume_2=$BB) + ; nextln: jump $(trap_2=$BB) ; check: $trap_2: ; nextln: trap heap_oob ; check: $resume_2: @@ -60,8 +60,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v7 = heap_addr.i64 heap3, v1, 0 ; check: v20 = icmp_imm.i64 ugt v1, 0x0001_0000 - ; check: brz v20, $(resume_3=$EBB) - ; nextln: jump $(trap_3=$EBB) + ; check: brz v20, $(resume_3=$BB) + ; nextln: jump $(trap_3=$BB) ; check: $trap_3: ; nextln: trap heap_oob ; check: $resume_3: @@ -72,8 +72,8 @@ ebb0(v0: i32, v1: i64, v3: i64): ; check: v22 = load.i32 notrap aligned v3+88 ; check: v23 = iadd_imm v22, 0 ; check: v24 = icmp.i32 ugt v0, v23 - ; check: brz v24, $(resume_4=$EBB) - ; nextln: jump $(trap_4=$EBB) + ; check: brz v24, $(resume_4=$BB) + ; nextln: jump $(trap_4=$BB) ; check: $trap_4: ; nextln: trap heap_oob ; check: $resume_4: @@ -85,8 +85,8 @@ ebb0(v0: i32, v1: i64, v3: i64): ; check: v27 = load.i32 notrap aligned v3+88 ; check: v28 = iadd_imm v27, 0 ; check: v29 = icmp.i32 ugt v0, v28 - ; check: brz v29, $(resume_5=$EBB) - ; nextln: jump $(trap_5=$EBB) + ; check: brz v29, $(resume_5=$BB) + ; nextln: jump $(trap_5=$BB) ; check: $trap_5: ; nextln: trap heap_oob ; check: $resume_5: @@ -98,8 +98,8 @@ ebb0(v0: i32, v1: i64, v3: i64): ; check: v32 = iadd_imm.i64 v3, 80 ; check: v33 = iadd_imm v32, 0 ; check: v34 = icmp.i64 ugt v1, v33 - ; check: brz v34, $(resume_6=$EBB) - ; nextln: jump $(trap_6=$EBB) + ; check: brz v34, $(resume_6=$BB) + ; nextln: jump $(trap_6=$BB) ; check: $trap_6: ; nextln: trap heap_oob ; check: $resume_6: @@ -110,8 +110,8 @@ ebb0(v0: i32, v1: i64, v3: i64): ; check: v36 = iadd_imm.i64 v3, 80 ; check: v37 = iadd_imm v36, 0 ; check: v38 = icmp.i64 ugt v1, v37 - ; check: brz v38, $(resume_7=$EBB) - ; nextln: jump $(trap_7=$EBB) + ; check: brz v38, $(resume_7=$BB) + ; nextln: jump $(trap_7=$BB) ; check: $trap_7: ; nextln: trap heap_oob ; check: $resume_7: diff --git a/cranelift/filetests/filetests/isa/x86/legalize-i128.clif b/cranelift/filetests/filetests/isa/x86/legalize-i128.clif index db071ba3c7..81a2d1ecdd 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-i128.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-i128.clif @@ -5,7 +5,7 @@ target x86_64 haswell ; regex: V=v\d+ function %imul(i128, i128) -> i128 { -ebb0(v1: i128, v2: i128): +block0(v1: i128, v2: i128): v10 = imul v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) diff --git a/cranelift/filetests/filetests/isa/x86/legalize-i64.clif b/cranelift/filetests/filetests/isa/x86/legalize-i64.clif index a484818a34..0d1eea5152 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-i64.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-i64.clif @@ -5,7 +5,7 @@ target i686 haswell ; regex: V=v\d+ function %iadd(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = iadd v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -16,7 +16,7 @@ ebb0(v1: i64, v2: i64): } function %isub(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = isub v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -27,7 +27,7 @@ ebb0(v1: i64, v2: i64): } function %imul(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = imul v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -42,7 +42,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_eq(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp eq v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -53,7 +53,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_eq(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm eq v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -67,7 +67,7 @@ ebb0(v1: i64): } function %icmp_ne(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp ne v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -78,7 +78,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_ne(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm ne v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -92,7 +92,7 @@ ebb0(v1: i64): } function %icmp_sgt(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp sgt v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -106,7 +106,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_sgt(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm sgt v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -123,7 +123,7 @@ ebb0(v1: i64): } function %icmp_sge(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp sge v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -137,7 +137,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_sge(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm sge v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -154,7 +154,7 @@ ebb0(v1: i64): } function %icmp_slt(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp slt v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -168,7 +168,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_slt(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm slt v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -185,7 +185,7 @@ ebb0(v1: i64): } function %icmp_sle(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp sle v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -199,7 +199,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_sle(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm sle v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -216,7 +216,7 @@ ebb0(v1: i64): } function %icmp_ugt(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp ugt v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -230,7 +230,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_ugt(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm ugt v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -247,7 +247,7 @@ ebb0(v1: i64): } function %icmp_uge(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp uge v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -261,7 +261,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_uge(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm uge v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -278,7 +278,7 @@ ebb0(v1: i64): } function %icmp_ult(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp ult v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -292,7 +292,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_ult(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm ult v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) @@ -309,7 +309,7 @@ ebb0(v1: i64): } function %icmp_ule(i64, i64) -> b1 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = icmp ule v1, v2 ; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V) ; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V) @@ -323,7 +323,7 @@ ebb0(v1: i64, v2: i64): } function %icmp_imm_ule(i64) -> b1 { -ebb0(v1: i64): +block0(v1: i64): v10 = icmp_imm ule v1, 0 ; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V) ; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V) diff --git a/cranelift/filetests/filetests/isa/x86/legalize-icmp-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-icmp-i8.clif index 41bd27950f..2519d3b484 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-icmp-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-icmp-i8.clif @@ -4,7 +4,7 @@ target x86_64 ; regex: V=v\d+ function u0:0(i8, i8) -> i8 fast { -ebb0(v0: i8, v1: i8): +block0(v0: i8, v1: i8): v2 = icmp_imm sle v0, 0 ; check: $(e1=$V) = sextend.i32 v0 ; nextln: v2 = icmp_imm sle $e1, 0 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-iconst-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-iconst-i8.clif index 8245ee73ce..39908d1f1d 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-iconst-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-iconst-i8.clif @@ -5,10 +5,10 @@ target x86_64 function u0:0(i64) system_v { ss0 = explicit_slot 0 -ebb0(v0: i64): - jump ebb1 +block0(v0: i64): + jump block1 -ebb1: +block1: ; _0 = const 42u8 v1 = iconst.i8 42 store v1, v0 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-imul-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-imul-i8.clif index d56ff787eb..6902636008 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-imul-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-imul-i8.clif @@ -4,7 +4,7 @@ target x86_64 function u0:0(i64, i8, i8) system_v { -ebb0(v0: i64, v1: i8, v2: i8): +block0(v0: i64, v1: i8, v2: i8): v11 = imul v1, v2 store v11, v0 return diff --git a/cranelift/filetests/filetests/isa/x86/legalize-imul-imm-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-imul-imm-i8.clif index 6655c562e7..82d3fa26ce 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-imul-imm-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-imul-imm-i8.clif @@ -5,7 +5,7 @@ target x86_64 function u0:0(i64, i8) system_v { ss0 = explicit_slot 1 -ebb0(v0: i64, v1: i8): +block0(v0: i64, v1: i8): v3 = stack_addr.i64 ss0 v5 = load.i8 v3 v6 = iconst.i8 2 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-isplit-backwards.clif b/cranelift/filetests/filetests/isa/x86/legalize-isplit-backwards.clif index 43881fe09e..5a903350b5 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-isplit-backwards.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-isplit-backwards.clif @@ -2,11 +2,11 @@ test compile target x86_64 function u0:0(i128) -> i64, i64 fast { -; check: ebb0(v4: i64 [%rdi], v5: i64 [%rsi], v8: i64 [%rbp]): -ebb0(v0: i128): - jump ebb2 +; check: block0(v4: i64 [%rdi], v5: i64 [%rsi], v8: i64 [%rbp]): +block0(v0: i128): + jump block2 -ebb1: +block1: ; When this `isplit` is legalized, the bnot below is not yet legalized, ; so there isn't a corresponding `iconcat` yet. We should try legalization ; for this `isplit` again once all instrucions have been legalized. @@ -14,11 +14,11 @@ ebb1: ; return v6, v7 return v2, v3 -ebb2: +block2: ; check: v6 = bnot.i64 v4 ; check: v2 -> v6 ; check: v7 = bnot.i64 v5 ; check: v3 -> v7 v1 = bnot.i128 v0 - jump ebb1 + jump block1 } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-libcall.clif b/cranelift/filetests/filetests/isa/x86/legalize-libcall.clif index e28bebd668..8ddb0865f8 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-libcall.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-libcall.clif @@ -5,7 +5,7 @@ set is_pic target x86_64 function %floor(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = floor v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-load-store-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-load-store-i8.clif index cecf0e145f..2fcb086e72 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-load-store-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-load-store-i8.clif @@ -9,16 +9,16 @@ function u0:0(i64, i8, i8) system_v { ss3 = explicit_slot 1 ss4 = explicit_slot 1 -ebb0(v0: i64, v1: i8, v2: i8): +block0(v0: i64, v1: i8, v2: i8): v3 = stack_addr.i64 ss1 store v1, v3 v4 = stack_addr.i64 ss2 store v2, v4 v5 = stack_addr.i64 ss3 v6 = stack_addr.i64 ss4 - jump ebb1 + jump block1 -ebb1: +block1: v7 = load.i8 v3 store v7, v5 v8 = load.i8 v4 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-memory.clif b/cranelift/filetests/filetests/isa/x86/legalize-memory.clif index 348d763716..78d1796d00 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-memory.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-memory.clif @@ -3,13 +3,13 @@ test legalizer target x86_64 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %vmctx(i64 vmctx) -> i64 { gv0 = vmctx gv1 = iadd_imm.i64 gv0, -16 -ebb1(v1: i64): +block1(v1: i64): v2 = global_value.i64 gv1 ; check: v2 = iadd_imm v1, -16 return v2 @@ -21,7 +21,7 @@ function %load(i64 vmctx) -> i64 { gv1 = load.i64 notrap aligned gv0-16 gv2 = iadd_imm.i64 gv1, 32 -ebb1(v1: i64): +block1(v1: i64): v2 = global_value.i64 gv2 ; check: $(p1=$V) = load.i64 notrap aligned v1-16 ; check: v2 = iadd_imm $p1, 32 @@ -33,7 +33,7 @@ function %symbol() -> i64 { gv0 = symbol %something gv1 = symbol u123:456 -ebb1: +block1: v0 = global_value.i64 gv0 ; check: v0 = symbol_value.i64 gv0 v1 = global_value.i64 gv1 @@ -49,8 +49,8 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v { gv1 = iadd_imm.i64 gv0, 64 heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v999: i64): - ; check: ebb0( +block0(v0: i32, v999: i64): + ; check: block0( v1 = heap_addr.i64 heap0, v0, 1 ; Boundscheck should be eliminated. ; Checks here are assuming that no pipehole opts fold the load offsets. @@ -70,13 +70,13 @@ function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v { gv1 = iadd_imm.i64 gv0, 64 heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v999: i64): +block0(v0: i32, v999: i64): ; Everything after the obviously OOB access should be eliminated, leaving - ; the `trap heap_oob` instruction as the terminator of the Ebb and moving - ; the remainder of the instructions into an inaccessible Ebb. - ; check: ebb0( + ; the `trap heap_oob` instruction as the terminator of the block and moving + ; the remainder of the instructions into an inaccessible block. + ; check: block0( ; nextln: trap heap_oob - ; check: ebb1: + ; check: block1: ; nextln: v1 = iconst.i64 0 ; nextln: v2 = load.f32 v1+16 ; nextln: return v2 @@ -94,13 +94,13 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v { gv1 = iadd_imm.i64 gv0, 64 heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v999: i64): - ; check: ebb0( +block0(v0: i32, v999: i64): + ; check: block0( v1 = heap_addr.i64 heap0, v0, 0x8000_0000 ; Boundscheck code ; check: $(oob=$V) = icmp - ; nextln: brz $oob, $(ok=$EBB) - ; nextln: jump $(trap_oob=$EBB) + ; nextln: brz $oob, $(ok=$BB) + ; nextln: jump $(trap_oob=$BB) ; check: $trap_oob: ; nextln: trap heap_oob ; check: $ok: diff --git a/cranelift/filetests/filetests/isa/x86/legalize-mulhi.clif b/cranelift/filetests/filetests/isa/x86/legalize-mulhi.clif index 946bcd8428..375a454c20 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-mulhi.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-mulhi.clif @@ -4,7 +4,7 @@ target x86_64 baseline ; umulhi/smulhi on 64 bit operands function %i64_umulhi(i64, i64) -> i64 { -ebb0(v10: i64, v11: i64): +block0(v10: i64, v11: i64): v12 = umulhi v10, v11 ; check: %rdi -> %rax ; check: x86_umulx @@ -13,7 +13,7 @@ ebb0(v10: i64, v11: i64): } function %i64_smulhi(i64, i64) -> i64 { -ebb0(v20: i64, v21: i64): +block0(v20: i64, v21: i64): v22 = smulhi v20, v21 ; check: %rdi -> %rax ; check: x86_smulx @@ -25,7 +25,7 @@ ebb0(v20: i64, v21: i64): ; umulhi/smulhi on 32 bit operands function %i32_umulhi(i32, i32) -> i32 { -ebb0(v30: i32, v31: i32): +block0(v30: i32, v31: i32): v32 = umulhi v30, v31 ; check: %rdi -> %rax ; check: x86_umulx @@ -34,7 +34,7 @@ ebb0(v30: i32, v31: i32): } function %i32_smulhi(i32, i32) -> i32 { -ebb0(v40: i32, v41: i32): +block0(v40: i32, v41: i32): v42 = smulhi v40, v41 ; check: %rdi -> %rax ; check: x86_smulx diff --git a/cranelift/filetests/filetests/isa/x86/legalize-popcnt-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-popcnt-i8.clif index e761a2c7ca..c3f89c4807 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-popcnt-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-popcnt-i8.clif @@ -2,7 +2,7 @@ test compile target x86_64 function u0:0(i8) -> i8 fast { -ebb0(v0: i8): +block0(v0: i8): v1 = popcnt v0 ; check-not: sextend.i32 v0 return v1 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-regmove-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-regmove-i8.clif index 8dc746d701..6f080ca89b 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-regmove-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-regmove-i8.clif @@ -11,16 +11,16 @@ function u0:0(i64, i64, i64) system_v { sig0 = (i64, i16, i64) system_v fn0 = colocated u0:11 sig0 -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): v3 = stack_addr.i64 ss1 store v1, v3 v4 = stack_addr.i64 ss2 store v2, v4 v5 = stack_addr.i64 ss3 v6 = stack_addr.i64 ss4 - jump ebb1 + jump block1 -ebb1: +block1: v7 = load.i64 v3 v8 = load.i16 v7 store v8, v5 @@ -29,8 +29,8 @@ ebb1: v10 = load.i16 v5 v11 = load.i64 v6 call fn0(v0, v10, v11) - jump ebb2 + jump block2 -ebb2: +block2: return } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-rotate.clif b/cranelift/filetests/filetests/isa/x86/legalize-rotate.clif index 155b6001b4..78524d2969 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-rotate.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-rotate.clif @@ -5,7 +5,7 @@ target x86_64 ; regex: R=%[a-z0-9]+ function %i32_rotr(i32, i32) -> i32 fast { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): ; check: regmove v1, $R -> %rcx ; check: v2 = rotr v0, v1 v2 = rotr v0, v1 @@ -13,14 +13,14 @@ ebb0(v0: i32, v1: i32): } function %i32_rotr_imm_1(i32) -> i32 fast { -ebb0(v0: i32): +block0(v0: i32): ; check: $V = rotr_imm v0, 1 v2 = rotr_imm v0, 1 return v2 } function %i32_rotl(i32, i32) -> i32 fast { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): ; check: regmove v1, $R -> %rcx ; check: v2 = rotl v0, v1 v2 = rotl v0, v1 @@ -28,7 +28,7 @@ ebb0(v0: i32, v1: i32): } function %i32_rotl_imm_1(i32) -> i32 fast { -ebb0(v0: i32): +block0(v0: i32): ; check: $V = rotl_imm v0, 1 v2 = rotl_imm v0, 1 return v2 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-shlr-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-shlr-i8.clif index dbd0da1204..ee6e3e6d11 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-shlr-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-shlr-i8.clif @@ -4,7 +4,7 @@ target x86_64 ; regex: V=v\d+ function u0:0(i8, i8) -> i8 fast { -ebb0(v0: i8, v1: i8): +block0(v0: i8, v1: i8): v2 = ishl v0, v1 ; check: $(e1=$V) = uextend.i32 v0 ; check: $(r1=$V) = ishl $e1, v1 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-splat.clif b/cranelift/filetests/filetests/isa/x86/legalize-splat.clif index 38731c778f..2fa6ace7e9 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-splat.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-splat.clif @@ -5,7 +5,7 @@ target x86_64 haswell ; use baldrdash_system_v calling convention here for simplicity (avoids prologue, epilogue) function %test_splat_i32() -> i32x4 baldrdash_system_v { -ebb0: +block0: v0 = iconst.i32 42 v1 = splat.i32x4 v0 return v1 @@ -14,7 +14,7 @@ ebb0: ; sameln: function %test_splat_i32() -> i32x4 [%xmm0] baldrdash_system_v { ; nextln: ss0 = incoming_arg 0, offset 0 ; nextln: -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 42 ; nextln: v2 = scalar_to_vector.i32x4 v0 ; nextln: v1 = x86_pshufd v2, 0 @@ -24,13 +24,13 @@ ebb0: function %test_splat_i64() -> i64x2 baldrdash_system_v { -ebb0: +block0: v0 = iconst.i64 42 v1 = splat.i64x2 v0 return v1 } -; check: ebb0: +; check: block0: ; nextln: v0 = iconst.i64 42 ; nextln: v2 = scalar_to_vector.i64x2 v0 ; nextln: v1 = x86_pinsr v2, 1, v0 @@ -39,13 +39,13 @@ ebb0: function %test_splat_b16() -> b16x8 baldrdash_system_v { -ebb0: +block0: v0 = bconst.b16 true v1 = splat.b16x8 v0 return v1 } -; check: ebb0: +; check: block0: ; nextln: v0 = bconst.b16 true ; nextln: v2 = scalar_to_vector.b16x8 v0 ; nextln: v3 = x86_pinsr v2, 1, v0 @@ -57,13 +57,13 @@ ebb0: function %test_splat_i8() -> i8x16 baldrdash_system_v { -ebb0: +block0: v0 = iconst.i8 42 v1 = splat.i8x16 v0 return v1 } -; check: ebb0: +; check: block0: ; nextln: v2 = iconst.i32 42 ; nextln: v0 = ireduce.i8 v2 ; nextln: v3 = scalar_to_vector.i8x16 v0 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-tables.clif b/cranelift/filetests/filetests/isa/x86/legalize-tables.clif index 762f8a1038..5f4632041d 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-tables.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-tables.clif @@ -2,7 +2,7 @@ test legalizer target x86_64 ; Test legalization for various forms of table addresses. -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function %table_addrs(i32, i64, i64 vmctx) { gv4 = vmctx @@ -20,12 +20,12 @@ function %table_addrs(i32, i64, i64 vmctx) { ; check: table2 = dynamic gv0, min 0x0001_0000, bound gv1, element_size 1, index_type i64 ; check: table3 = dynamic gv0, min 0, bound gv1, element_size 16, index_type i64 -ebb0(v0: i32, v1: i64, v3: i64): +block0(v0: i32, v1: i64, v3: i64): v4 = table_addr.i64 table0, v0, +0 ; check: v8 = load.i32 notrap aligned v3+88 ; check: v9 = icmp uge v0, v8 - ; check: brz v9, $(resume_1=$EBB) - ; nextln: jump $(trap_1=$EBB) + ; check: brz v9, $(resume_1=$BB) + ; nextln: jump $(trap_1=$BB) ; check: $trap_1: ; nextln: trap table_oob ; check: $resume_1: @@ -36,8 +36,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v5 = table_addr.i64 table1, v0, +0 ; check: v12 = load.i32 notrap aligned v3+88 ; check: v13 = icmp.i32 uge v0, v12 - ; check: brz v13, $(resume_2=$EBB) - ; nextln: jump $(trap_2=$EBB) + ; check: brz v13, $(resume_2=$BB) + ; nextln: jump $(trap_2=$BB) ; check: $trap_2: ; nextln: trap table_oob ; check: $resume_2: @@ -49,8 +49,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v6 = table_addr.i64 table2, v1, +0 ; check: v17 = iadd_imm.i64 v3, 80 ; check: v18 = icmp.i64 uge v1, v17 - ; check: brz v18, $(resume_3=$EBB) - ; nextln: jump $(trap_3=$EBB) + ; check: brz v18, $(resume_3=$BB) + ; nextln: jump $(trap_3=$BB) ; check: $trap_3: ; nextln: trap table_oob ; check: $resume_3: @@ -60,8 +60,8 @@ ebb0(v0: i32, v1: i64, v3: i64): v7 = table_addr.i64 table3, v1, +0 ; check: v20 = iadd_imm.i64 v3, 80 ; check: v21 = icmp.i64 uge v1, v20 - ; check: brz v21, $(resume_4=$EBB) - ; nextln: jump $(trap_4=$EBB) + ; check: brz v21, $(resume_4=$BB) + ; nextln: jump $(trap_4=$BB) ; check: $trap_4: ; nextln: trap table_oob ; check: $resume_4: diff --git a/cranelift/filetests/filetests/isa/x86/legalize-urem-i8.clif b/cranelift/filetests/filetests/isa/x86/legalize-urem-i8.clif index 0c66a3f580..de193c2abb 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-urem-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-urem-i8.clif @@ -4,7 +4,7 @@ target x86_64 ; regex: V=v\d+ function u0:0(i8, i8) -> i8 fast { -ebb0(v0: i8, v1: i8): +block0(v0: i8, v1: i8): v2 = urem v0, v1 ; check: $(a=$V) = uextend.i32 v0 ; nextln: $(b=$V) = uextend.i32 v1 diff --git a/cranelift/filetests/filetests/isa/x86/load-store-narrow.clif b/cranelift/filetests/filetests/isa/x86/load-store-narrow.clif index 5f95b92fc0..070b7459e2 100644 --- a/cranelift/filetests/filetests/isa/x86/load-store-narrow.clif +++ b/cranelift/filetests/filetests/isa/x86/load-store-narrow.clif @@ -2,14 +2,14 @@ test compile target i686 function u0:0(i64, i32) system_v { -ebb0(v0: i64, v1: i32): +block0(v0: i64, v1: i32): v2 = bor v0, v0 store v2, v1 return } function u0:1(i32) -> i64 system_v { -ebb0(v1: i32): +block0(v1: i32): v0 = load.i64 v1 v2 = bor v0, v0 return v2 diff --git a/cranelift/filetests/filetests/isa/x86/nop.clif b/cranelift/filetests/filetests/isa/x86/nop.clif index 2863185e41..08d4fdd7a0 100644 --- a/cranelift/filetests/filetests/isa/x86/nop.clif +++ b/cranelift/filetests/filetests/isa/x86/nop.clif @@ -3,7 +3,7 @@ test compile target x86_64 function %test(i32) -> i32 system_v { -ebb0(v0: i32): +block0(v0: i32): nop v1 = iconst.i32 42 return v1 diff --git a/cranelift/filetests/filetests/isa/x86/optimized-zero-constants-32bit.clif b/cranelift/filetests/filetests/isa/x86/optimized-zero-constants-32bit.clif index 7dbbcc86e0..0f0f06e6f2 100644 --- a/cranelift/filetests/filetests/isa/x86/optimized-zero-constants-32bit.clif +++ b/cranelift/filetests/filetests/isa/x86/optimized-zero-constants-32bit.clif @@ -4,21 +4,21 @@ set opt_level=speed_and_size target i686 function %foo() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm0, %xmm0 [-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0 return v0 } function %bar() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm0, %xmm0 [-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0 return v1 } function %zero_dword() -> i32 fast { -ebb0: +block0: ; asm: xor %eax, %eax [-,%rax] v0 = iconst.i32 0 ; bin: 31 c0 ; asm: xor %edi, %edi @@ -27,7 +27,7 @@ ebb0: } function %zero_word() -> i16 fast { -ebb0: +block0: ; while you may expect this to be encoded like 6631c0, aka ; xor %ax, %ax, the upper 16 bits of the register used for ; i16 are left undefined, so it's not wrong to clear them. @@ -43,7 +43,7 @@ ebb0: } function %zero_byte() -> i8 fast { -ebb0: +block0: ; asm: xor %al, %al [-,%rax] v0 = iconst.i8 0 ; bin: 30 c0 ; asm: xor %dh, %dh diff --git a/cranelift/filetests/filetests/isa/x86/optimized-zero-constants.clif b/cranelift/filetests/filetests/isa/x86/optimized-zero-constants.clif index 807466e84c..7f5890a1ae 100644 --- a/cranelift/filetests/filetests/isa/x86/optimized-zero-constants.clif +++ b/cranelift/filetests/filetests/isa/x86/optimized-zero-constants.clif @@ -4,35 +4,35 @@ set opt_level=speed_and_size target x86_64 function %zero_const_32bit_no_rex() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm0, %xmm0 [-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0 return v0 } function %zero_const_32bit_rex() -> f32 fast { -ebb0: +block0: ; asm: xorps %xmm8, %xmm8 [-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0 return v1 } function %zero_const_64bit_no_rex() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm0, %xmm0 [-,%xmm0] v0 = f64const 0.0 ; bin: 66 0f 57 c0 return v0 } function %zero_const_64bit_rex() -> f64 fast { -ebb0: +block0: ; asm: xorpd %xmm8, %xmm8 [-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0 return v1 } function %imm_zero_register() -> i64 fast { -ebb0: +block0: ; asm: xor %eax, %eax [-,%rax] v0 = iconst.i64 0 ; bin: 31 c0 ; asm: xor %edi, %edi @@ -45,7 +45,7 @@ ebb0: } function %zero_word() -> i16 fast { -ebb0: +block0: ; while you may expect this to be encoded like 6631c0, aka ; xor %ax, %ax, the upper 16 bits of the register used for ; i16 are left undefined, so it's not wrong to clear them. @@ -61,7 +61,7 @@ ebb0: } function %zero_byte() -> i8 fast { -ebb0: +block0: ; asm: xor %r8b, %r8b [-,%r15] v0 = iconst.i8 0 ; bin: 45 30 ff ; asm: xor %al, %al diff --git a/cranelift/filetests/filetests/isa/x86/pinned-reg.clif b/cranelift/filetests/filetests/isa/x86/pinned-reg.clif index 2a447a6d9d..f4bbc2501b 100644 --- a/cranelift/filetests/filetests/isa/x86/pinned-reg.clif +++ b/cranelift/filetests/filetests/isa/x86/pinned-reg.clif @@ -11,7 +11,7 @@ target x86_64 ; r15 is the pinned heap register. It must not be rewritten, so it must not be ; used as a tied output register. function %tied_input() -> i64 system_v { -ebb0: +block0: v1 = get_pinned_reg.i64 v2 = iadd_imm v1, 42 return v2 @@ -25,7 +25,7 @@ ebb0: ;; It musn't be used even if this is a tied input used twice. function %tied_twice() -> i64 system_v { -ebb0: +block0: v1 = get_pinned_reg.i64 v2 = iadd v1, v1 return v2 @@ -38,7 +38,7 @@ ebb0: ; sameln: iadd v1, v1 function %uses() -> i64 system_v { -ebb0: +block0: v1 = get_pinned_reg.i64 v2 = iadd_imm v1, 42 v3 = get_pinned_reg.i64 @@ -62,7 +62,7 @@ function u0:1(i64 vmctx) -> i64 system_v { gv0 = vmctx heap0 = static gv0, min 0x000a_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32 -ebb0(v42: i64): +block0(v42: i64): v5 = iconst.i32 42 v6 = heap_addr.i64 heap0, v5, 0 v7 = load.i64 v6 diff --git a/cranelift/filetests/filetests/isa/x86/probestack-adjusts-sp.clif b/cranelift/filetests/filetests/isa/x86/probestack-adjusts-sp.clif index 934e308bc3..17be399a4e 100644 --- a/cranelift/filetests/filetests/isa/x86/probestack-adjusts-sp.clif +++ b/cranelift/filetests/filetests/isa/x86/probestack-adjusts-sp.clif @@ -8,7 +8,7 @@ target x86_64 function %big() system_v { ss0 = explicit_slot 300000 -ebb0: +block0: return } ; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v { @@ -17,7 +17,7 @@ ebb0: ; nextln: sig0 = (i64 [%rax]) probestack ; nextln: fn0 = colocated %Probestack sig0 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0 diff --git a/cranelift/filetests/filetests/isa/x86/probestack-disabled.clif b/cranelift/filetests/filetests/isa/x86/probestack-disabled.clif index f548e1a11f..433c634cab 100644 --- a/cranelift/filetests/filetests/isa/x86/probestack-disabled.clif +++ b/cranelift/filetests/filetests/isa/x86/probestack-disabled.clif @@ -7,14 +7,14 @@ target x86_64 function %big() system_v { ss0 = explicit_slot 300000 -ebb0: +block0: return } ; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v { ; nextln: ss0 = explicit_slot 300000, offset -300016 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 0x0004_93e0 diff --git a/cranelift/filetests/filetests/isa/x86/probestack-noncolocated.clif b/cranelift/filetests/filetests/isa/x86/probestack-noncolocated.clif index c304945231..9af61f0586 100644 --- a/cranelift/filetests/filetests/isa/x86/probestack-noncolocated.clif +++ b/cranelift/filetests/filetests/isa/x86/probestack-noncolocated.clif @@ -5,7 +5,7 @@ target x86_64 function %big() system_v { ss0 = explicit_slot 300000 -ebb0: +block0: return } ; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v { @@ -14,7 +14,7 @@ ebb0: ; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack ; nextln: fn0 = %Probestack sig0 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0 diff --git a/cranelift/filetests/filetests/isa/x86/probestack-size.clif b/cranelift/filetests/filetests/isa/x86/probestack-size.clif index 6b01b5cd92..8eb934b06c 100644 --- a/cranelift/filetests/filetests/isa/x86/probestack-size.clif +++ b/cranelift/filetests/filetests/isa/x86/probestack-size.clif @@ -8,7 +8,7 @@ target x86_64 function %big() system_v { ss0 = explicit_slot 4097 -ebb0: +block0: return } @@ -16,7 +16,7 @@ ebb0: ; nextln: ss0 = explicit_slot 4097, offset -4113 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 4112 @@ -30,7 +30,7 @@ ebb0: function %bigger() system_v { ss0 = explicit_slot 8192 -ebb0: +block0: return } @@ -38,7 +38,7 @@ ebb0: ; nextln: ss0 = explicit_slot 8192, offset -8208 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 8192 @@ -52,7 +52,7 @@ ebb0: function %biggest() system_v { ss0 = explicit_slot 8193 -ebb0: +block0: return } @@ -62,7 +62,7 @@ ebb0: ; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack ; nextln: fn0 = colocated %Probestack sig0 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 8208 diff --git a/cranelift/filetests/filetests/isa/x86/probestack.clif b/cranelift/filetests/filetests/isa/x86/probestack.clif index 0fae5f33e2..d9f29a8681 100644 --- a/cranelift/filetests/filetests/isa/x86/probestack.clif +++ b/cranelift/filetests/filetests/isa/x86/probestack.clif @@ -6,7 +6,7 @@ target x86_64 function %big() system_v { ss0 = explicit_slot 4097 -ebb0: +block0: return } ; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v { @@ -15,7 +15,7 @@ ebb0: ; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack ; nextln: fn0 = colocated %Probestack sig0 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 4112 @@ -31,7 +31,7 @@ ebb0: function %small() system_v { ss0 = explicit_slot 4096 -ebb0: +block0: return } @@ -39,7 +39,7 @@ ebb0: ; nextln: ss0 = explicit_slot 4096, offset -4112 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: [RexOp1pushq#50] x86_push v0 ; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp ; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 4096 diff --git a/cranelift/filetests/filetests/isa/x86/prologue-epilogue.clif b/cranelift/filetests/filetests/isa/x86/prologue-epilogue.clif index f8a0c0146c..25118ca72b 100644 --- a/cranelift/filetests/filetests/isa/x86/prologue-epilogue.clif +++ b/cranelift/filetests/filetests/isa/x86/prologue-epilogue.clif @@ -6,14 +6,14 @@ target x86_64 haswell ; An empty function. function %empty() { -ebb0: +block0: return } ; check: function %empty(i64 fp [%rbp]) -> i64 fp [%rbp] fast { ; nextln: ss0 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: x86_push v0 ; nextln: copy_special %rsp -> %rbp ; nextln: v1 = x86_pop.i64 @@ -24,7 +24,7 @@ ebb0: function %one_stack_slot() { ss0 = explicit_slot 168 -ebb0: +block0: return } @@ -32,7 +32,7 @@ ebb0: ; nextln: ss0 = explicit_slot 168, offset -184 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: x86_push v0 ; nextln: copy_special %rsp -> %rbp ; nextln: adjust_sp_down_imm 176 @@ -46,7 +46,7 @@ ebb0: function %call() { fn0 = %foo() -ebb0: +block0: call fn0() return } @@ -56,7 +56,7 @@ ebb0: ; nextln: sig0 = () fast ; nextln: fn0 = %foo sig0 ; nextln: -; nextln: ebb0(v0: i64 [%rbp]): +; nextln: block0(v0: i64 [%rbp]): ; nextln: x86_push v0 ; nextln: copy_special %rsp -> %rbp ; nextln: call fn0() @@ -67,7 +67,7 @@ ebb0: ; A function that uses a lot of registers but doesn't quite need to spill. function %no_spill(i64, i64) { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = load.i32 v0+0 v3 = load.i32 v0+8 v4 = load.i32 v0+16 @@ -100,7 +100,7 @@ ebb0(v0: i64, v1: i64): ; check: function %no_spill(i64 [%rdi], i64 [%rsi], i64 fp [%rbp], i64 csr [%rbx], i64 csr [%r12], i64 csr [%r13], i64 csr [%r14], i64 csr [%r15]) -> i64 fp [%rbp], i64 csr [%rbx], i64 csr [%r12], i64 csr [%r13], i64 csr [%r14], i64 csr [%r15] fast { ; nextln: ss0 = incoming_arg 56, offset -56 ; nextln: -; nextln: ebb0(v0: i64 [%rdi], v1: i64 [%rsi], v15: i64 [%rbp], v16: i64 [%rbx], v17: i64 [%r12], v18: i64 [%r13], v19: i64 [%r14], v20: i64 [%r15]): +; nextln: block0(v0: i64 [%rdi], v1: i64 [%rsi], v15: i64 [%rbp], v16: i64 [%rbx], v17: i64 [%r12], v18: i64 [%r13], v19: i64 [%r14], v20: i64 [%r15]): ; nextln: x86_push v15 ; nextln: copy_special %rsp -> %rbp ; nextln: x86_push v16 @@ -146,7 +146,7 @@ ebb0(v0: i64, v1: i64): ; This function requires too many registers and must spill. function %yes_spill(i64, i64) { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = load.i32 v0+0 v3 = load.i32 v0+8 v4 = load.i32 v0+16 @@ -181,7 +181,7 @@ ebb0(v0: i64, v1: i64): ; check: function %yes_spill(i64 [%rdi], i64 [%rsi], i64 fp [%rbp], i64 csr [%rbx], i64 csr [%r12], i64 csr [%r13], i64 csr [%r14], i64 csr [%r15]) -> i64 fp [%rbp], i64 csr [%rbx], i64 csr [%r12], i64 csr [%r13], i64 csr [%r14], i64 csr [%r15] fast { ; check: ss0 = spill_slot -; check: ebb0(v16: i64 [%rdi], v17: i64 [%rsi], v48: i64 [%rbp], v49: i64 [%rbx], v50: i64 [%r12], v51: i64 [%r13], v52: i64 [%r14], v53: i64 [%r15]): +; check: block0(v16: i64 [%rdi], v17: i64 [%rsi], v48: i64 [%rbp], v49: i64 [%rbx], v50: i64 [%r12], v51: i64 [%r13], v52: i64 [%r14], v53: i64 [%r15]): ; nextln: x86_push v48 ; nextln: copy_special %rsp -> %rbp ; nextln: x86_push v49 @@ -208,21 +208,21 @@ ebb0(v0: i64, v1: i64): ; A function which uses diverted registers. function %divert(i32) -> i32 system_v { -ebb0(v0: i32): +block0(v0: i32): v2 = iconst.i32 0 v3 = iconst.i32 1 - jump ebb1(v0, v3, v2) + jump block1(v0, v3, v2) -ebb1(v4: i32, v5: i32, v6: i32): - brz v4, ebb3 - jump ebb2 +block1(v4: i32, v5: i32, v6: i32): + brz v4, block3 + jump block2 -ebb2: +block2: v7 = iadd v5, v6 v8 = iadd_imm v4, -1 - jump ebb1(v8, v7, v5) + jump block1(v8, v7, v5) -ebb3: +block3: return v5 } @@ -234,7 +234,7 @@ ebb3: function %stack_limit(i64 stack_limit) { ss0 = explicit_slot 168 -ebb0(v0: i64): +block0(v0: i64): return } @@ -242,7 +242,7 @@ ebb0(v0: i64): ; nextln: ss0 = explicit_slot 168, offset -184 ; nextln: ss1 = incoming_arg 16, offset -16 ; nextln: -; nextln: ebb0(v0: i64 [%rdi], v4: i64 [%rbp]): +; nextln: block0(v0: i64 [%rdi], v4: i64 [%rbp]): ; nextln: v1 = copy v0 ; nextln: v2 = iadd_imm v1, 16 ; nextln: v3 = ifcmp_sp v2 diff --git a/cranelift/filetests/filetests/isa/x86/pshufb.clif b/cranelift/filetests/filetests/isa/x86/pshufb.clif index 6fb31b198c..c9d5d798d9 100644 --- a/cranelift/filetests/filetests/isa/x86/pshufb.clif +++ b/cranelift/filetests/filetests/isa/x86/pshufb.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 has_ssse3=true function %test_pshufb() { -ebb0: +block0: [-, %rax] v0 = iconst.i8 42 [-, %xmm0] v1 = scalar_to_vector.i8x16 v0 ; bin: 66 40 0f 6e c0 [-, %rbx] v2 = iconst.i8 43 diff --git a/cranelift/filetests/filetests/isa/x86/pshufd.clif b/cranelift/filetests/filetests/isa/x86/pshufd.clif index 6f4896d0d9..69dc3f4ea0 100644 --- a/cranelift/filetests/filetests/isa/x86/pshufd.clif +++ b/cranelift/filetests/filetests/isa/x86/pshufd.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 function %test_pshuf() { -ebb0: +block0: [-, %rax] v0 = iconst.i32 42 [-, %xmm0] v1 = scalar_to_vector.i32x4 v0 ; bin: 66 40 0f 6e c0 [-, %xmm0] v2 = x86_pshufd v1, 0 ; bin: 66 0f 70 c0 00 diff --git a/cranelift/filetests/filetests/isa/x86/raw_bitcast.clif b/cranelift/filetests/filetests/isa/x86/raw_bitcast.clif index 5c1c2ea322..717f655ec6 100644 --- a/cranelift/filetests/filetests/isa/x86/raw_bitcast.clif +++ b/cranelift/filetests/filetests/isa/x86/raw_bitcast.clif @@ -2,7 +2,7 @@ test binemit target x86_64 function %test_raw_bitcast_i16x8_to_b32x4() { -ebb0: +block0: [-, %rbx] v0 = bconst.b16 true [-, %xmm2] v1 = scalar_to_vector.b16x8 v0 [-, %xmm2] v2 = raw_bitcast.i32x4 v1 ; bin: diff --git a/cranelift/filetests/filetests/isa/x86/relax_branch.clif b/cranelift/filetests/filetests/isa/x86/relax_branch.clif index 7fb8a85167..8b29a057e0 100644 --- a/cranelift/filetests/filetests/isa/x86/relax_branch.clif +++ b/cranelift/filetests/filetests/isa/x86/relax_branch.clif @@ -17,10 +17,10 @@ function u0:2691(i32 [%rdi], i32 [%rsi], i64 vmctx [%r14]) -> i64 uext [%rax] ba gv2 = load.i64 notrap aligned readonly gv0 heap0 = static gv2, min 0xd839_6000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32 - ebb0(v0: i32 [%rdi], v1: i32 [%rsi], v2: i64 [%r14]): -@0005 [-] fallthrough ebb3(v0, v1) + block0(v0: i32 [%rdi], v1: i32 [%rsi], v2: i64 [%r14]): +@0005 [-] fallthrough block3(v0, v1) - ebb3(v8: i32 [%rdi], v19: i32 [%rsi]): + block3(v8: i32 [%rdi], v19: i32 [%rsi]): @0005 [RexOp1ldDisp8#808b,%rax] v7 = load.i64 v2+48 @0005 [DynRexOp1rcmp_ib#f083,%rflags] v91 = ifcmp_imm v7, 0 @0005 [trapif#00] trapif ne v91, interrupt @@ -54,20 +54,20 @@ function u0:2691(i32 [%rdi], i32 [%rsi], i64 vmctx [%r14]) -> i64 uext [%rax] ba @0048 [DynRexOp1r_ib#70c1,%rcx] v35 = sshr_imm v33, 24 @004c [DynRexOp1r_id#4081,%rcx] v37 = band_imm v35, 255 [DynRexOp1rcmp_ib#7083,%rflags] v97 = ifcmp_imm v37, 26 -@0050 [Op1brib#70] brif sge v97, ebb6 -@0050 [-] fallthrough ebb10 +@0050 [Op1brib#70] brif sge v97, block6 +@0050 [-] fallthrough block10 - ebb10: + block10: [DynRexOp1umr#89,%rcx] v101 = copy v18 -@0054 [Op1jmpb#eb] jump ebb5(v18, v101) +@0054 [Op1jmpb#eb] jump block5(v18, v101) - ebb6: + block6: [DynRexOp1umr#89,%rcx] v102 = copy.i32 v16 @0059 [RexOp1rmov#89] regmove v102, %rcx -> %rdi @0059 [RexOp1rmov#89] regmove.i32 v16, %rbx -> %rcx -@0059 [-] fallthrough ebb5(v102, v16) +@0059 [-] fallthrough block5(v102, v16) - ebb5(v41: i32 [%rdi], v84: i32 [%rcx]): + block5(v41: i32 [%rdi], v84: i32 [%rcx]): v83 -> v84 @005d [DynRexOp1r_id#4081,%rdi] v43 = band_imm v41, 255 @0062 [DynRexOp1r_ib#40c1,%rdi] v45 = ishl_imm v43, 24 @@ -84,49 +84,49 @@ function u0:2691(i32 [%rdi], i32 [%rsi], i64 vmctx [%r14]) -> i64 uext [%rax] ba @0080 [DynRexOp1r_id#4081,%rdx] v63 = band_imm v61, 255 [DynRexOp1rcmp_ib#7083,%rflags] v98 = ifcmp_imm v63, 26 @0084 [RexOp1rmov#89] regmove v47, %rdi -> %rbx -@0084 [Op1brib#70] brif sge v98, ebb8 -@0084 [-] fallthrough ebb11 +@0084 [Op1brib#70] brif sge v98, block8 +@0084 [-] fallthrough block11 - ebb11: + block11: [DynRexOp1umr#89,%rdx] v103 = copy.i32 v29 -@0088 [Op1jmpb#eb] jump ebb7(v29, v10, v21, v103) +@0088 [Op1jmpb#eb] jump block7(v29, v10, v21, v103) - ebb8: + block8: [DynRexOp1umr#89,%rdx] v104 = copy.i32 v27 @008d [RexOp1rmov#89] regmove v104, %rdx -> %r9 @008d [RexOp1rmov#89] regmove.i32 v27, %rsi -> %rdx -@008d [-] fallthrough ebb7(v104, v10, v21, v27) +@008d [-] fallthrough block7(v104, v10, v21, v27) - ebb7(v67: i32 [%r9], v79: i32 [%rax], v81: i32 [%r8], v87: i32 [%rdx]): + block7(v67: i32 [%r9], v79: i32 [%rax], v81: i32 [%r8], v87: i32 [%rdx]): @0091 [DynRexOp1r_id#4081,%r9] v71 = band_imm v67, 255 @0094 [DynRexOp1r_ib#40c1,%r9] v73 = ishl_imm v71, 24 @0097 [DynRexOp1r_ib#70c1,%r9] v75 = sshr_imm v73, 24 @0098 [DynRexOp1icscc#39,%rbx] v76 = icmp.i32 eq v47, v75 @0098 [Op2urm_noflags_abcd#4b6,%rbx] v77 = bint.i32 v76 @0099 [DynRexOp1rr#21,%r10] v78 = band.i32 v50, v77 -@009a [RexOp1tjccb#74] brz v78, ebb9 -@009a [-] fallthrough ebb12 +@009a [RexOp1tjccb#74] brz v78, block9 +@009a [-] fallthrough block12 - ebb12: + block12: [DynRexOp1umr#89,%rcx] v99 = copy v81 [DynRexOp1umr#89,%rdx] v100 = copy v79 @00a4 [RexOp1rmov#89] regmove v100, %rdx -> %rdi @00a4 [RexOp1rmov#89] regmove v99, %rcx -> %rsi -@00a4 [Op1jmpd#e9] jump ebb3(v100, v99); bin: e9 ffffff2d +@00a4 [Op1jmpd#e9] jump block3(v100, v99); bin: e9 ffffff2d - ebb9: -@00a7 [-] fallthrough ebb4 + block9: +@00a7 [-] fallthrough block4 - ebb4: + block4: @00ad [DynRexOp1r_id#4081,%rcx] v86 = band_imm.i32 v84, 255 @00b3 [DynRexOp1r_id#4081,%rdx] v89 = band_imm.i32 v87, 255 @00b4 [DynRexOp1rr#29,%rcx] v90 = isub v86, v89 -@00b5 [-] fallthrough ebb2(v90) +@00b5 [-] fallthrough block2(v90) - ebb2(v5: i32 [%rcx]): -@00b6 [-] fallthrough ebb1(v5) + block2(v5: i32 [%rcx]): +@00b6 [-] fallthrough block1(v5) - ebb1(v3: i32 [%rcx]): + block1(v3: i32 [%rcx]): @00b6 [Op1umr#89,%rax] v96 = uextend.i64 v3 @00b6 [-] fallthrough_return v96 } diff --git a/cranelift/filetests/filetests/isa/x86/run-const.clif b/cranelift/filetests/filetests/isa/x86/run-const.clif index 1ac5062e49..c39d39adfb 100644 --- a/cranelift/filetests/filetests/isa/x86/run-const.clif +++ b/cranelift/filetests/filetests/isa/x86/run-const.clif @@ -1,7 +1,7 @@ test run function %test_compare_i32() -> b1 { -ebb0: +block0: v0 = iconst.i32 42 v1 = iconst.i32 42 v2 = icmp eq v0, v1 diff --git a/cranelift/filetests/filetests/isa/x86/run-i64.clif b/cranelift/filetests/filetests/isa/x86/run-i64.clif index 6fae71966e..ae4a618573 100644 --- a/cranelift/filetests/filetests/isa/x86/run-i64.clif +++ b/cranelift/filetests/filetests/isa/x86/run-i64.clif @@ -3,14 +3,14 @@ test compile target i686 haswell function %iadd(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = iadd v1, v2 ; check: iadd_ifcout return v10 } function %isub(i64, i64) -> i64 { -ebb0(v1: i64, v2: i64): +block0(v1: i64, v2: i64): v10 = isub v1, v2 ; check: isub_ifbout return v10 diff --git a/cranelift/filetests/filetests/isa/x86/saturating-float-cast.clif b/cranelift/filetests/filetests/isa/x86/saturating-float-cast.clif index 5986e1f864..36b69ca8b7 100644 --- a/cranelift/filetests/filetests/isa/x86/saturating-float-cast.clif +++ b/cranelift/filetests/filetests/isa/x86/saturating-float-cast.clif @@ -2,7 +2,7 @@ test compile target x86_64 function u0:0() -> f32 system_v { -ebb0: +block0: v0 = iconst.i8 255 ; check: v2 = iconst.i32 255 ; nextln: v0 = ireduce.i8 v2 diff --git a/cranelift/filetests/filetests/isa/x86/scalar_to_vector-binemit.clif b/cranelift/filetests/filetests/isa/x86/scalar_to_vector-binemit.clif index 112939cd7d..4a02e6bac6 100644 --- a/cranelift/filetests/filetests/isa/x86/scalar_to_vector-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/scalar_to_vector-binemit.clif @@ -4,28 +4,28 @@ set enable_simd target x86_64 function %test_scalar_to_vector_b8() { -ebb0: +block0: [-, %rax] v0 = bconst.b8 true [-, %xmm0] v1 = scalar_to_vector.b8x16 v0 ; bin: 66 0f 6e c0 return } function %test_scalar_to_vector_i16() { -ebb0: +block0: [-, %rbx] v0 = iconst.i16 42 [-, %xmm2] v1 = scalar_to_vector.i16x8 v0 ; bin: 66 0f 6e d3 return } function %test_scalar_to_vector_b32() { -ebb0: +block0: [-, %rcx] v0 = bconst.b32 false [-, %xmm3] v1 = scalar_to_vector.b32x4 v0 ; bin: 66 0f 6e d9 return } function %test_scalar_to_vector_i64() { -ebb0: +block0: [-, %rdx] v0 = iconst.i64 42 [-, %xmm7] v1 = scalar_to_vector.i64x2 v0 ; bin: 66 48 0f 6e fa return diff --git a/cranelift/filetests/filetests/isa/x86/scalar_to_vector-compile.clif b/cranelift/filetests/filetests/isa/x86/scalar_to_vector-compile.clif index 7fa27eecc6..8ab62db59d 100644 --- a/cranelift/filetests/filetests/isa/x86/scalar_to_vector-compile.clif +++ b/cranelift/filetests/filetests/isa/x86/scalar_to_vector-compile.clif @@ -6,13 +6,13 @@ target x86_64 ; ensure that scalar_to_vector emits no instructions for floats (already exist in an XMM register) function %test_scalar_to_vector_f32() -> f32x4 baldrdash_system_v { -ebb0: +block0: v0 = f32const 0x0.42 v1 = scalar_to_vector.f32x4 v0 return v1 } -; check: ebb0 +; check: block0 ; nextln: v2 = iconst.i32 0x3e84_0000 ; nextln: v0 = bitcast.f32 v2 ; nextln: [null_fpr#00,%xmm0] v1 = scalar_to_vector.f32x4 v0 diff --git a/cranelift/filetests/filetests/isa/x86/select-i8.clif b/cranelift/filetests/filetests/isa/x86/select-i8.clif index aac59c1e9c..feec520860 100644 --- a/cranelift/filetests/filetests/isa/x86/select-i8.clif +++ b/cranelift/filetests/filetests/isa/x86/select-i8.clif @@ -2,7 +2,7 @@ test compile target x86_64 function u0:0(b1, i8, i8) -> i8 { -ebb0(v0: b1, v1: i8, v2: i8): +block0(v0: b1, v1: i8, v2: i8): v3 = select v0, v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/x86/shrink-multiple-uses.clif b/cranelift/filetests/filetests/isa/x86/shrink-multiple-uses.clif index 50e2389feb..f896d8cc25 100644 --- a/cranelift/filetests/filetests/isa/x86/shrink-multiple-uses.clif +++ b/cranelift/filetests/filetests/isa/x86/shrink-multiple-uses.clif @@ -3,16 +3,16 @@ set opt_level=speed_and_size target x86_64 function %test_multiple_uses(i32 [%rdi]) -> i32 { -ebb0(v0: i32 [%rdi]): +block0(v0: i32 [%rdi]): [DynRexOp1rcmp_ib#7083,%rflags] v3 = ifcmp_imm v0, 0 [Op2seti_abcd#490,%rax] v1 = trueif eq v3 [RexOp2urm_noflags#4b6,%rax] v2 = bint.i32 v1 -[Op1brib#70] brif eq v3, ebb1 -[Op1jmpb#eb] jump ebb2 +[Op1brib#70] brif eq v3, block1 +[Op1jmpb#eb] jump block2 -ebb2: +block2: [Op1ret#c3] return v2 -ebb1: +block1: [Op2trap#40b] trap user0 } diff --git a/cranelift/filetests/filetests/isa/x86/shrink.clif b/cranelift/filetests/filetests/isa/x86/shrink.clif index b0d3174ece..9b0832b2a7 100644 --- a/cranelift/filetests/filetests/isa/x86/shrink.clif +++ b/cranelift/filetests/filetests/isa/x86/shrink.clif @@ -10,7 +10,7 @@ target x86_64 ; function %test_shrinking(i32) -> i32 { -ebb0(v0: i32 [ %rdi ]): +block0(v0: i32 [ %rdi ]): ; asm: movl $0x2,%eax [-,%rcx] v1 = iconst.i32 2 ; bin: b9 00000002 ; asm: subl %ecx,%edi @@ -19,7 +19,7 @@ ebb0(v0: i32 [ %rdi ]): } function %test_not_shrinking(i32) -> i32 { -ebb0(v0: i32 [ %r8 ]): +block0(v0: i32 [ %r8 ]): ; asm: movl $0x2,%eax [-,%rcx] v1 = iconst.i32 2 ; bin: b9 00000002 ; asm: subl %ecx,%edi @@ -28,7 +28,7 @@ ebb0(v0: i32 [ %r8 ]): } function %test_not_shrinking_i8() { -ebb0: +block0: [-,%rsi] v1 = iconst.i8 1 ; asm: movsbl %sil,%esi [-,%rsi] v2 = sextend.i32 v1 ; bin: 40 0f be f6 diff --git a/cranelift/filetests/filetests/isa/x86/shuffle-legalize.clif b/cranelift/filetests/filetests/isa/x86/shuffle-legalize.clif index d192489448..78c6bfef40 100644 --- a/cranelift/filetests/filetests/isa/x86/shuffle-legalize.clif +++ b/cranelift/filetests/filetests/isa/x86/shuffle-legalize.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %test_shuffle_different_ssa_values() -> i8x16 { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = vconst.i8x16 0x01 v2 = shuffle v0, v1, 0x11000000000000000000000000000000 ; pick the second lane of v1, the rest use the first lane of v0 @@ -20,7 +20,7 @@ ebb0: function %test_shuffle_same_ssa_value() -> i8x16 { -ebb0: +block0: v1 = vconst.i8x16 0x01 v2 = shuffle v1, v1, 0x13000000000000000000000000000000 ; pick the fourth lane of v1 and the rest from the first lane of v1 return v2 diff --git a/cranelift/filetests/filetests/isa/x86/shuffle-run.clif b/cranelift/filetests/filetests/isa/x86/shuffle-run.clif index bc9eecb689..44e4998907 100644 --- a/cranelift/filetests/filetests/isa/x86/shuffle-run.clif +++ b/cranelift/filetests/filetests/isa/x86/shuffle-run.clif @@ -2,7 +2,7 @@ test run set enable_simd function %test_shuffle_different_ssa_values() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42] v2 = shuffle v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 31] ; use the first lane of v0 throughout except use the last lane of v1 @@ -15,7 +15,7 @@ ebb0: ; run function %test_shuffle_same_ssa_value() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 0x01000000_00000000_00000000_00000000 ; note where lane 15 is when written with hexadecimal syntax v1 = shuffle v0, v0, 0x0f0f0f0f_0f0f0f0f_0f0f0f0f_0f0f0f0f ; use the last lane of v0 to fill all lanes v2 = extractlane.i8x16 v1, 4 @@ -27,7 +27,7 @@ ebb0: ; run function %compare_shuffle() -> b1 { -ebb0: +block0: v1 = vconst.i32x4 [0 1 2 3] v2 = raw_bitcast.i8x16 v1 ; we have to cast because shuffle is type-limited to Tx16 ; keep each lane in place from the first vector @@ -45,7 +45,7 @@ ebb0: function %compare_shuffle() -> b32 { -ebb0: +block0: v1 = vconst.b32x4 [true false true false] v2 = raw_bitcast.b8x16 v1 ; we have to cast because shuffle is type-limited to Tx16 ; pair up the true values to make the entire vector true diff --git a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-binemit.clif b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-binemit.clif index 2994d36146..9f5b4f0080 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-binemit.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %iadd_i32x4() -> b1 { -ebb0: +block0: [-, %xmm0] v0 = vconst.i32x4 [1 1 1 1] [-, %xmm1] v1 = vconst.i32x4 [1 2 3 4] [-, %xmm0] v2 = iadd v0, v1 ; bin: 66 0f fe c1 @@ -19,7 +19,7 @@ ebb0: } function %iadd_i8x16_with_overflow() -> b1 { -ebb0: +block0: [-, %xmm0] v0 = vconst.i8x16 [255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255] [-, %xmm7] v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] [-, %xmm0] v2 = iadd v0, v1 ; bin: 66 0f fc c7 @@ -31,19 +31,19 @@ ebb0: } function %iadd_i16x8(i16x8, i16x8) -> i16x8 { -ebb0(v0: i16x8 [%xmm1], v1: i16x8 [%xmm2]): +block0(v0: i16x8 [%xmm1], v1: i16x8 [%xmm2]): [-, %xmm1] v2 = iadd v0, v1 ; bin: 66 0f fd ca return v2 } function %iadd_i64x2(i64x2, i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm3], v1: i64x2 [%xmm4]): +block0(v0: i64x2 [%xmm3], v1: i64x2 [%xmm4]): [-, %xmm3] v2 = iadd v0, v1 ; bin: 66 0f d4 dc return v2 } function %isub_i32x4() -> b1 { -ebb0: +block0: [-, %xmm3] v0 = vconst.i32x4 [1 1 1 1] [-, %xmm5] v1 = vconst.i32x4 [1 2 3 4] [-, %xmm3] v2 = isub v0, v1 ; bin: 66 0f fa dd @@ -59,25 +59,25 @@ ebb0: } function %isub_i64x2(i64x2, i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm0], v1: i64x2 [%xmm1]): +block0(v0: i64x2 [%xmm0], v1: i64x2 [%xmm1]): [-, %xmm0] v2 = isub v0, v1 ; bin: 66 0f fb c1 return v2 } function %isub_i16x8(i16x8, i16x8) -> i16x8 { -ebb0(v0: i16x8 [%xmm3], v1: i16x8 [%xmm4]): +block0(v0: i16x8 [%xmm3], v1: i16x8 [%xmm4]): [-, %xmm3] v2 = isub v0, v1 ; bin: 66 0f f9 dc return v2 } function %isub_i8x16(i8x16, i8x16) -> i8x16 { -ebb0(v0: i8x16 [%xmm3], v1: i8x16 [%xmm4]): +block0(v0: i8x16 [%xmm3], v1: i8x16 [%xmm4]): [-, %xmm3] v2 = isub v0, v1 ; bin: 66 0f f8 dc return v2 } function %imul_i32x4() -> b1 { -ebb0: +block0: [-, %xmm0] v0 = vconst.i32x4 [-1 0 1 -2147483647] ; e.g. -2147483647 == 0x80_00_00_01 [-, %xmm1] v1 = vconst.i32x4 [2 2 2 2] [-, %xmm0] v2 = imul v0, v1 ; bin: 66 0f 38 40 c1 @@ -98,7 +98,7 @@ ebb0: function %imul_i16x8() -> b1 { -ebb0: +block0: [-, %xmm1] v0 = vconst.i16x8 [-1 0 1 32767 0 0 0 0] ; e.g. 32767 == 0x7f_ff [-, %xmm2] v1 = vconst.i16x8 [2 2 2 2 0 0 0 0] [-, %xmm1] v2 = imul v0, v1 ; bin: 66 0f d5 ca @@ -121,7 +121,7 @@ ebb0: function %sadd_sat_i8x16() -> b1 { -ebb0: +block0: [-, %xmm2] v0 = vconst.i8x16 [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] [-, %xmm3] v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -134,7 +134,7 @@ ebb0: function %uadd_sat_i16x8() -> b1 { -ebb0: +block0: [-, %xmm2] v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0] [-, %xmm3] v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] @@ -147,7 +147,7 @@ ebb0: function %sub_sat_i8x16() -> b1 { -ebb0: +block0: [-, %xmm2] v0 = vconst.i8x16 [128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] ; 128 == 0x80 == -128 [-, %xmm3] v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -166,7 +166,7 @@ ebb0: function %sub_sat_i16x8() { -ebb0: +block0: [-, %xmm3] v0 = vconst.i16x8 [0 0 0 0 0 0 0 0] [-, %xmm5] v1 = vconst.i16x8 [1 1 1 1 1 1 1 1] [-, %xmm3] v2 = ssub_sat v0, v1 ; bin: 66 0f e9 dd @@ -175,7 +175,7 @@ ebb0: } function %float_arithmetic_f32x4(f32x4, f32x4) { -ebb0(v0: f32x4 [%xmm3], v1: f32x4 [%xmm5]): +block0(v0: f32x4 [%xmm3], v1: f32x4 [%xmm5]): [-, %xmm3] v2 = fadd v0, v1 ; bin: 40 0f 58 dd [-, %xmm3] v3 = fsub v0, v1 ; bin: 40 0f 5c dd [-, %xmm3] v4 = fmul v0, v1 ; bin: 40 0f 59 dd @@ -187,7 +187,7 @@ ebb0(v0: f32x4 [%xmm3], v1: f32x4 [%xmm5]): } function %float_arithmetic_f64x2(f64x2, f64x2) { -ebb0(v0: f64x2 [%xmm3], v1: f64x2 [%xmm5]): +block0(v0: f64x2 [%xmm3], v1: f64x2 [%xmm5]): [-, %xmm3] v2 = fadd v0, v1 ; bin: 66 40 0f 58 dd [-, %xmm3] v3 = fsub v0, v1 ; bin: 66 40 0f 5c dd [-, %xmm3] v4 = fmul v0, v1 ; bin: 66 40 0f 59 dd diff --git a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-legalize.clif b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-legalize.clif index b6b033833e..5211e1d796 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-legalize.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-legalize.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %ineg_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 1 1 1] v2 = ineg v0 ; check: v5 = vconst.i32x4 0x00 @@ -16,7 +16,7 @@ ebb0: } function %ineg_legalized() { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = ineg v0 ; check: v6 = vconst.i8x16 0x00 @@ -36,7 +36,7 @@ ebb0: } function %fneg_legalized() { -ebb0: +block0: v0 = vconst.f32x4 [0x1.0 0x2.0 0x3.0 0x4.0] v1 = fneg v0 ; check: v4 = vconst.i32x4 0xffffffffffffffffffffffffffffffff @@ -55,7 +55,7 @@ ebb0: } function %fabs_legalized() { -ebb0: +block0: v0 = vconst.f64x2 [0x1.0 -0x2.0] v1 = fabs v0 ; check: v2 = vconst.i64x2 0xffffffffffffffffffffffffffffffff diff --git a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-run.clif b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-run.clif index 429928b213..971f5c9bdb 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-arithmetic-run.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-arithmetic-run.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %iadd_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 1 1 1] v1 = vconst.i32x4 [1 2 3 4] v2 = iadd v0, v1 @@ -21,7 +21,7 @@ ebb0: ; run function %iadd_i8x16_with_overflow() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255] v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] v2 = iadd v0, v1 @@ -35,7 +35,7 @@ ebb0: ; run function %isub_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 1 1 1] v1 = vconst.i32x4 [1 2 3 4] v2 = isub v0, v1 @@ -54,7 +54,7 @@ ebb0: function %ineg_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 1 1 1] v2 = ineg v0 @@ -66,7 +66,7 @@ ebb0: ; run function %imul_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [-1 0 1 -2147483647] ; e.g. -2147483647 == 0x80_00_00_01 v1 = vconst.i32x4 [2 2 2 2] v2 = imul v0, v1 @@ -87,7 +87,7 @@ ebb0: ; run function %imul_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 0 1 32767 0 0 0 0] ; e.g. 32767 == 0x7f_ff v1 = vconst.i16x8 [2 2 2 2 0 0 0 0] v2 = imul v0, v1 @@ -110,7 +110,7 @@ ebb0: ; run function %sadd_sat_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -123,7 +123,7 @@ ebb0: ; run function %uadd_sat_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0] v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] @@ -136,7 +136,7 @@ ebb0: ; run function %sub_sat_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] ; 128 == 0x80 == -128 v1 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -155,7 +155,7 @@ ebb0: ; run function %add_sub_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0x4.2 0.0 0.0 0.0] v1 = vconst.f32x4 [0x1.0 0x1.0 0x1.0 0x1.0] v2 = vconst.f32x4 [0x5.2 0x1.0 0x1.0 0x1.0] @@ -173,7 +173,7 @@ ebb0: ; run function %mul_div_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0x4.2 -0x2.1 0x2.0 0.0] v1 = vconst.f32x4 [0x3.4 0x6.7 0x8.9 0xa.b] v2 = vconst.f32x4 [0xd.68 -0xd.47 0x11.2 0x0.0] @@ -191,7 +191,7 @@ ebb0: ; run function %sqrt_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [0x9.0 0x1.0] v1 = sqrt v0 v2 = vconst.f64x2 [0x3.0 0x1.0] @@ -202,7 +202,7 @@ ebb0: ; run function %fmax_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [-0.0 -0x1.0] v1 = vconst.f64x2 [+0.0 +0x1.0] @@ -215,7 +215,7 @@ ebb0: ; run function %fmin_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [-0x1.0 -0x1.0] v1 = vconst.f64x2 [+0.0 +0x1.0] @@ -228,7 +228,7 @@ ebb0: ; run function %fneg_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [0x1.0 -0x1.0] v1 = fneg v0 @@ -241,7 +241,7 @@ ebb0: ; run function %fneg_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0x0.0 -0x0.0 -Inf Inf] v1 = fneg v0 @@ -254,7 +254,7 @@ ebb0: ; run function %fabs_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0x0.0 -0x1.0 0x2.0 -0x3.0] v1 = fabs v0 diff --git a/cranelift/filetests/filetests/isa/x86/simd-bitwise-binemit.clif b/cranelift/filetests/filetests/isa/x86/simd-bitwise-binemit.clif index af8796863c..599c58fd80 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-bitwise-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-bitwise-binemit.clif @@ -3,103 +3,103 @@ set enable_simd target x86_64 skylake function %ishl_i16x8(i16x8, i64x2) -> i16x8 { -ebb0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): +block0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): [-, %xmm2] v2 = x86_psll v0, v1 ; bin: 66 0f f1 d1 return v2 } function %ishl_i32x4(i32x4, i64x2) -> i32x4 { -ebb0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): +block0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): [-, %xmm4] v2 = x86_psll v0, v1 ; bin: 66 0f f2 e0 return v2 } function %ishl_i64x2(i64x2, i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]): +block0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]): [-, %xmm6] v2 = x86_psll v0, v1 ; bin: 66 0f f3 f3 return v2 } function %ushr_i16x8(i16x8, i64x2) -> i16x8 { -ebb0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): +block0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): [-, %xmm2] v2 = x86_psrl v0, v1 ; bin: 66 0f d1 d1 return v2 } function %ushr_i32x4(i32x4, i64x2) -> i32x4 { -ebb0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): +block0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): [-, %xmm4] v2 = x86_psrl v0, v1 ; bin: 66 0f d2 e0 return v2 } function %ushr_i64x2(i64x2, i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]): +block0(v0: i64x2 [%xmm6], v1: i64x2 [%xmm3]): [-, %xmm6] v2 = x86_psrl v0, v1 ; bin: 66 0f d3 f3 return v2 } function %sshr_i16x8(i16x8, i64x2) -> i16x8 { -ebb0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): +block0(v0: i16x8 [%xmm2], v1: i64x2 [%xmm1]): [-, %xmm2] v2 = x86_psra v0, v1 ; bin: 66 0f e1 d1 return v2 } function %sshr_i32x4(i32x4, i64x2) -> i32x4 { -ebb0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): +block0(v0: i32x4 [%xmm4], v1: i64x2 [%xmm0]): [-, %xmm4] v2 = x86_psra v0, v1 ; bin: 66 0f e2 e0 return v2 } function %ishl_imm_i16x8(i16x8) -> i16x8 { -ebb0(v0: i16x8 [%xmm2]): +block0(v0: i16x8 [%xmm2]): [-, %xmm2] v2 = ishl_imm v0, 3 ; bin: 66 0f 71 f2 03 return v2 } function %ishl_imm_i32x4(i32x4) -> i32x4 { -ebb0(v0: i32x4 [%xmm4]): +block0(v0: i32x4 [%xmm4]): [-, %xmm4] v2 = ishl_imm v0, 10 ; bin: 66 0f 72 f4 0a return v2 } function %ishl_imm_i64x2(i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm6]): +block0(v0: i64x2 [%xmm6]): [-, %xmm6] v2 = ishl_imm v0, 42 ; bin: 66 0f 73 f6 2a return v2 } function %ushr_imm_i16x8(i16x8) -> i16x8 { -ebb0(v0: i16x8 [%xmm2]): +block0(v0: i16x8 [%xmm2]): [-, %xmm2] v2 = ushr_imm v0, 3 ; bin: 66 0f 71 d2 03 return v2 } function %ushr_imm_i32x4(i32x4) -> i32x4 { -ebb0(v0: i32x4 [%xmm4]): +block0(v0: i32x4 [%xmm4]): [-, %xmm4] v2 = ushr_imm v0, 10 ; bin: 66 0f 72 d4 0a return v2 } function %ushr_imm_i64x2(i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm6]): +block0(v0: i64x2 [%xmm6]): [-, %xmm6] v2 = ushr_imm v0, 42 ; bin: 66 0f 73 d6 2a return v2 } function %sshr_imm_i16x8(i16x8) -> i16x8 { -ebb0(v0: i16x8 [%xmm2]): +block0(v0: i16x8 [%xmm2]): [-, %xmm2] v2 = sshr_imm v0, 3 ; bin: 66 0f 71 e2 03 return v2 } function %sshr_imm_i32x4(i32x4) -> i32x4 { -ebb0(v0: i32x4 [%xmm4]): +block0(v0: i32x4 [%xmm4]): [-, %xmm4] v2 = sshr_imm v0, 10 ; bin: 66 0f 72 e4 0a return v2 } function %sshr_imm_i64x2(i64x2) -> i64x2 { -ebb0(v0: i64x2 [%xmm6]): +block0(v0: i64x2 [%xmm6]): [-, %xmm6] v2 = sshr_imm v0, 42 ; bin: 66 0f 73 e6 2a return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/simd-bitwise-legalize.clif b/cranelift/filetests/filetests/isa/x86/simd-bitwise-legalize.clif index e8391c8a73..af7036b27a 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-bitwise-legalize.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-bitwise-legalize.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %ishl_i32x4() -> i32x4 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i32x4 [1 2 4 8] v2 = ishl v1, v0 @@ -13,7 +13,7 @@ ebb0: } function %ushr_i64x2() -> i64x2 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i64x2 [1 2] v2 = ushr v1, v0 @@ -23,7 +23,7 @@ ebb0: } function %sshr_i16x8() -> i16x8 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i16x8 [1 2 4 8 16 32 64 128] v2 = sshr v1, v0 @@ -33,7 +33,7 @@ ebb0: } function %bitselect_i16x8() -> i16x8 { -ebb0: +block0: v0 = vconst.i16x8 [0 0 0 0 0 0 0 0] v1 = vconst.i16x8 [0 0 0 0 0 0 0 0] v2 = vconst.i16x8 [0 0 0 0 0 0 0 0] diff --git a/cranelift/filetests/filetests/isa/x86/simd-bitwise-run.clif b/cranelift/filetests/filetests/isa/x86/simd-bitwise-run.clif index 8ca92a756f..670c501c9b 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-bitwise-run.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-bitwise-run.clif @@ -5,7 +5,7 @@ target x86_64 skylake ; TODO: once available, replace all lane extraction with `icmp + all_ones` function %ishl_i32x4() -> b1 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i32x4 [1 2 4 8] v2 = ishl v1, v0 @@ -22,7 +22,7 @@ ebb0: ; run function %ishl_too_large_i16x8() -> b1 { -ebb0: +block0: v0 = iconst.i32 17 ; note that this will shift off the end of each lane v1 = vconst.i16x8 [1 2 4 8 16 32 64 128] v2 = ishl v1, v0 @@ -39,7 +39,7 @@ ebb0: ; run function %ushr_i64x2() -> b1 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i64x2 [1 2] v2 = ushr v1, v0 @@ -56,7 +56,7 @@ ebb0: ; run function %ushr_too_large_i32x4() -> b1 { -ebb0: +block0: v0 = iconst.i32 33 ; note that this will shift off the end of each lane v1 = vconst.i32x4 [1 2 4 8] v2 = ushr v1, v0 @@ -73,7 +73,7 @@ ebb0: ; run function %sshr_i16x8() -> b1 { -ebb0: +block0: v0 = iconst.i32 1 v1 = vconst.i16x8 [-1 2 4 8 -16 32 64 128] v2 = sshr v1, v0 @@ -90,7 +90,7 @@ ebb0: ; run function %sshr_too_large_i32x4() -> b1 { -ebb0: +block0: v0 = iconst.i32 33 ; note that this will shift off the end of each lane v1 = vconst.i32x4 [1 2 4 -8] v2 = sshr v1, v0 @@ -107,7 +107,7 @@ ebb0: ; run function %bitselect_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 255] ; the selector vector v1 = vconst.i8x16 [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42] ; for each 1-bit in v0 the bit of v1 is selected v2 = vconst.i8x16 [42 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127] ; for each 0-bit in v0 the bit of v2 is selected @@ -129,7 +129,7 @@ ebb0: ; run function %sshr_imm_i32x4() -> b1 { -ebb0: +block0: v1 = vconst.i32x4 [1 2 4 -8] v2 = sshr_imm v1, 1 @@ -141,7 +141,7 @@ ebb0: ; run function %sshr_imm_i16x8() -> b1 { -ebb0: +block0: v1 = vconst.i16x8 [1 2 4 -8 0 0 0 0] v2 = ushr_imm v1, 1 @@ -153,7 +153,7 @@ ebb0: ; run function %ishl_imm_i64x2() -> b1 { -ebb0: +block0: v1 = vconst.i64x2 [1 0] v2 = ishl_imm v1, 1 diff --git a/cranelift/filetests/filetests/isa/x86/simd-comparison-binemit.clif b/cranelift/filetests/filetests/isa/x86/simd-comparison-binemit.clif index 722e705a85..053b50a9f3 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-comparison-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-comparison-binemit.clif @@ -3,31 +3,31 @@ set enable_simd target x86_64 skylake function %icmp_sgt_i8x16(i8x16, i8x16) -> b8x16 { -ebb0(v0: i8x16 [%xmm2], v1: i8x16 [%xmm1]): +block0(v0: i8x16 [%xmm2], v1: i8x16 [%xmm1]): [-, %xmm2] v2 = icmp sgt v0, v1 ; bin: 66 0f 64 d1 return v2 } function %icmp_sgt_i16x8(i16x8, i16x8) -> b16x8 { -ebb0(v0: i16x8 [%xmm4], v1: i16x8 [%xmm3]): +block0(v0: i16x8 [%xmm4], v1: i16x8 [%xmm3]): [-, %xmm4] v2 = icmp sgt v0, v1 ; bin: 66 0f 65 e3 return v2 } function %icmp_sgt_i32x4(i32x4, i32x4) -> b32x4 { -ebb0(v0: i32x4 [%xmm6], v1: i32x4 [%xmm5]): +block0(v0: i32x4 [%xmm6], v1: i32x4 [%xmm5]): [-, %xmm6] v2 = icmp sgt v0, v1 ; bin: 66 0f 66 f5 return v2 } function %icmp_sgt_i64x2(i64x2, i64x2) -> b64x2 { -ebb0(v0: i64x2 [%xmm0], v1: i64x2 [%xmm7]): +block0(v0: i64x2 [%xmm0], v1: i64x2 [%xmm7]): [-, %xmm0] v2 = icmp sgt v0, v1 ; bin: 66 0f 38 37 c7 return v2 } function %min_max_i8x16(i8x16, i8x16) { -ebb0(v0: i8x16 [%xmm3], v1: i8x16 [%xmm1]): +block0(v0: i8x16 [%xmm3], v1: i8x16 [%xmm1]): [-, %xmm3] v2 = x86_pmaxs v0, v1 ; bin: 66 0f 38 3c d9 [-, %xmm3] v3 = x86_pmaxu v0, v1 ; bin: 66 0f de d9 [-, %xmm3] v4 = x86_pmins v0, v1 ; bin: 66 0f 38 38 d9 @@ -36,7 +36,7 @@ ebb0(v0: i8x16 [%xmm3], v1: i8x16 [%xmm1]): } function %min_max_i16x8(i16x8, i16x8) { -ebb0(v0: i16x8 [%xmm2], v1: i16x8 [%xmm5]): +block0(v0: i16x8 [%xmm2], v1: i16x8 [%xmm5]): [-, %xmm2] v2 = x86_pmaxs v0, v1 ; bin: 66 0f ee d5 [-, %xmm2] v3 = x86_pmaxu v0, v1 ; bin: 66 0f 38 3e d5 [-, %xmm2] v4 = x86_pmins v0, v1 ; bin: 66 0f ea d5 @@ -45,7 +45,7 @@ ebb0(v0: i16x8 [%xmm2], v1: i16x8 [%xmm5]): } function %min_max_i32x4(i32x4, i32x4) { -ebb0(v0: i32x4 [%xmm2], v1: i32x4 [%xmm4]): +block0(v0: i32x4 [%xmm2], v1: i32x4 [%xmm4]): [-, %xmm2] v2 = x86_pmaxs v0, v1 ; bin: 66 0f 38 3d d4 [-, %xmm2] v3 = x86_pmaxu v0, v1 ; bin: 66 0f 38 3f d4 [-, %xmm2] v4 = x86_pmins v0, v1 ; bin: 66 0f 38 39 d4 @@ -54,7 +54,7 @@ ebb0(v0: i32x4 [%xmm2], v1: i32x4 [%xmm4]): } function %fcmp_f32x4(f32x4, f32x4) { -ebb0(v0: f32x4 [%xmm2], v1: f32x4 [%xmm4]): +block0(v0: f32x4 [%xmm2], v1: f32x4 [%xmm4]): [-, %xmm2] v2 = fcmp eq v0, v1 ; bin: 40 0f c2 d4 00 [-, %xmm2] v3 = fcmp lt v0, v1 ; bin: 40 0f c2 d4 01 [-, %xmm2] v4 = fcmp le v0, v1 ; bin: 40 0f c2 d4 02 @@ -67,7 +67,7 @@ ebb0(v0: f32x4 [%xmm2], v1: f32x4 [%xmm4]): } function %fcmp_f64x2(f64x2, f64x2) { -ebb0(v0: f64x2 [%xmm2], v1: f64x2 [%xmm0]): +block0(v0: f64x2 [%xmm2], v1: f64x2 [%xmm0]): [-, %xmm2] v2 = fcmp eq v0, v1 ; bin: 66 40 0f c2 d0 00 [-, %xmm2] v3 = fcmp lt v0, v1 ; bin: 66 40 0f c2 d0 01 [-, %xmm2] v4 = fcmp le v0, v1 ; bin: 66 40 0f c2 d0 02 diff --git a/cranelift/filetests/filetests/isa/x86/simd-comparison-legalize.clif b/cranelift/filetests/filetests/isa/x86/simd-comparison-legalize.clif index acbff943eb..61888ccb6d 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-comparison-legalize.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-comparison-legalize.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 { -ebb0(v0: i32x4, v1: i32x4): +block0(v0: i32x4, v1: i32x4): v2 = icmp ne v0, v1 ; check: v3 = icmp eq v0, v1 ; nextln: v4 = vconst.b32x4 0xffffffffffffffffffffffffffffffff @@ -12,7 +12,7 @@ ebb0(v0: i32x4, v1: i32x4): } function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 { -ebb0(v0: i32x4, v1: i32x4): +block0(v0: i32x4, v1: i32x4): v2 = icmp ugt v0, v1 ; check: v3 = x86_pmaxu v0, v1 ; nextln: v4 = icmp eq v3, v1 @@ -22,7 +22,7 @@ ebb0(v0: i32x4, v1: i32x4): } function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 { -ebb0(v0: i16x8, v1: i16x8): +block0(v0: i16x8, v1: i16x8): v2 = icmp sge v0, v1 ; check: v3 = x86_pmins v0, v1 ; nextln: v2 = icmp eq v3, v1 @@ -30,7 +30,7 @@ ebb0(v0: i16x8, v1: i16x8): } function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 { -ebb0(v0: i8x16, v1: i8x16): +block0(v0: i8x16, v1: i8x16): v2 = icmp uge v0, v1 ; check: v3 = x86_pminu v0, v1 ; nextln: v2 = icmp eq v3, v1 diff --git a/cranelift/filetests/filetests/isa/x86/simd-comparison-run.clif b/cranelift/filetests/filetests/isa/x86/simd-comparison-run.clif index 444d4e28bd..7cbd285860 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-comparison-run.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-comparison-run.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %icmp_eq_i8x16() -> b8 { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = vconst.i8x16 0x00 v2 = icmp eq v0, v1 @@ -13,7 +13,7 @@ ebb0: ; run function %icmp_eq_i64x2() -> b64 { -ebb0: +block0: v0 = vconst.i64x2 0xffffffffffffffffffffffffffffffff v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff v2 = icmp eq v0, v1 @@ -23,7 +23,7 @@ ebb0: ; run function %icmp_ne_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [0 1 2 3] v1 = vconst.i32x4 [7 7 7 7] v2 = icmp ne v0, v1 @@ -33,7 +33,7 @@ ebb0: ; run function %icmp_ne_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [0 1 2 3 4 5 6 7] v1 = vconst.i16x8 [0 1 2 3 4 5 6 7] v2 = icmp ne v0, v1 @@ -45,7 +45,7 @@ ebb0: ; run function %icmp_sgt_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 0] v1 = vconst.i8x16 [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0xff] v2 = icmp sgt v0, v1 @@ -58,7 +58,7 @@ ebb0: ; run function %icmp_sgt_i64x2() -> b1 { -ebb0: +block0: v0 = vconst.i64x2 [0 -42] v1 = vconst.i64x2 [-1 -43] v2 = icmp sgt v0, v1 @@ -68,7 +68,7 @@ ebb0: ; run function %maxs_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 will be greater than -1 == 0xff with ; signed max v1 = vconst.i8x16 [0xff 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -79,7 +79,7 @@ ebb0: ; run function %maxu_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [0 1 1 1 1 1 1 1] v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] ; -1 == 0xff will be greater with unsigned max v2 = x86_pmaxu v0, v1 @@ -89,7 +89,7 @@ ebb0: ; run function %mins_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [0 1 1 1] v1 = vconst.i32x4 [-1 1 1 1] ; -1 == 0xff will be less with signed min v2 = x86_pmins v0, v1 @@ -99,7 +99,7 @@ ebb0: ; run function %minu_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] ; 1 < 2 with unsiged min v1 = vconst.i8x16 [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] v2 = x86_pminu v0, v1 @@ -109,7 +109,7 @@ ebb0: ; run function %icmp_ugt_i8x16() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] v1 = vconst.i8x16 [0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] v2 = icmp ugt v0, v1 @@ -119,7 +119,7 @@ ebb0: ; run function %icmp_sge_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 1 2 3 4 5 6 7] v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] v2 = icmp sge v0, v1 @@ -129,7 +129,7 @@ ebb0: ; run function %icmp_uge_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 2 3 4] v1 = vconst.i32x4 [1 1 1 1] v2 = icmp uge v0, v1 @@ -139,7 +139,7 @@ ebb0: ; run function %icmp_slt_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [-1 1 1 1] v1 = vconst.i32x4 [1 2 3 4] v2 = icmp slt v0, v1 @@ -149,7 +149,7 @@ ebb0: ; run function %icmp_ult_i32x4() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 [1 1 1 1] v1 = vconst.i32x4 [-1 2 3 4] ; -1 = 0xffff... will be greater than 1 when unsigned v2 = icmp ult v0, v1 @@ -160,7 +160,7 @@ ebb0: function %icmp_ult_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1] v1 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1] v2 = icmp ult v0, v1 @@ -173,7 +173,7 @@ ebb0: ; run function %icmp_sle_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 -1 0 0 0 0 0 0] v1 = vconst.i16x8 [-1 0 0 0 0 0 0 0] v2 = icmp sle v0, v1 @@ -183,7 +183,7 @@ ebb0: ; run function %icmp_ule_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0] v1 = vconst.i16x8 [-1 -1 0 0 0 0 0 0] v2 = icmp ule v0, v1 @@ -193,7 +193,7 @@ ebb0: ; run function %fcmp_eq_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0] v1 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0] v2 = fcmp eq v0, v1 @@ -203,7 +203,7 @@ ebb0: ; run function %fcmp_lt_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [0.0 -0x4.2 0x0.0 -0.0] v1 = vconst.f32x4 [0x0.001 0x4.2 0x0.33333 0x1.0] v2 = fcmp lt v0, v1 @@ -213,7 +213,7 @@ ebb0: ; run function %fcmp_ge_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [0x0.0 0x4.2] v1 = vconst.f64x2 [0.0 0x4.1] v2 = fcmp ge v0, v1 @@ -223,7 +223,7 @@ ebb0: ; run function %fcmp_uno_f64x2() -> b1 { -ebb0: +block0: v0 = vconst.f64x2 [0.0 NaN] v1 = vconst.f64x2 [NaN 0x4.1] v2 = fcmp uno v0, v1 @@ -233,7 +233,7 @@ ebb0: ; run function %fcmp_gt_nans_f32x4() -> b1 { -ebb0: +block0: v0 = vconst.f32x4 [NaN 0x42.0 -NaN NaN] v1 = vconst.f32x4 [NaN NaN 0x42.0 Inf] v2 = fcmp gt v0, v1 diff --git a/cranelift/filetests/filetests/isa/x86/simd-construction-run.clif b/cranelift/filetests/filetests/isa/x86/simd-construction-run.clif index eecd82e024..ef2aeea26d 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-construction-run.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-construction-run.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %splat_i64x2() -> b1 { -ebb0: +block0: v0 = iconst.i64 -1 v1 = splat.i64x2 v0 v2 = vconst.i64x2 [-1 -1] diff --git a/cranelift/filetests/filetests/isa/x86/simd-logical-binemit.clif b/cranelift/filetests/filetests/isa/x86/simd-logical-binemit.clif index 6d6a3fac31..2f7c4f5b22 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-logical-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-logical-binemit.clif @@ -3,31 +3,31 @@ set enable_simd target x86_64 skylake function %bor_b16x8(b16x8, b16x8) -> b16x8 { -ebb0(v0: b16x8 [%xmm2], v1: b16x8 [%xmm1]): +block0(v0: b16x8 [%xmm2], v1: b16x8 [%xmm1]): [-, %xmm2] v2 = bor v0, v1 ; bin: 66 0f eb d1 return v2 } function %band_b64x2(b64x2, b64x2) -> b64x2 { -ebb0(v0: b64x2 [%xmm6], v1: b64x2 [%xmm3]): +block0(v0: b64x2 [%xmm6], v1: b64x2 [%xmm3]): [-, %xmm6] v2 = band v0, v1 ; bin: 66 0f db f3 return v2 } function %bxor_b32x4(b32x4, b32x4) -> b32x4 { -ebb0(v0: b32x4 [%xmm4], v1: b32x4 [%xmm0]): +block0(v0: b32x4 [%xmm4], v1: b32x4 [%xmm0]): [-, %xmm4] v2 = bxor v0, v1 ; bin: 66 0f ef e0 return v2 } function %band_not_b64x2(b64x2, b64x2) -> b64x2 { -ebb0(v0: b64x2 [%xmm6], v1: b64x2 [%xmm3]): +block0(v0: b64x2 [%xmm6], v1: b64x2 [%xmm3]): [-, %xmm3] v2 = band_not v0, v1 ; bin: 66 0f df de return v2 } function %x86_ptest_f64x2(f64x2, f64x2) { -ebb0(v0: f64x2 [%xmm0], v1: f64x2 [%xmm2]): +block0(v0: f64x2 [%xmm0], v1: f64x2 [%xmm2]): [-, %rflags] v2 = x86_ptest v0, v1 ; bin: 66 0f 38 17 c2 return } diff --git a/cranelift/filetests/filetests/isa/x86/simd-logical-legalize.clif b/cranelift/filetests/filetests/isa/x86/simd-logical-legalize.clif index 2e13f79b9b..a1248c8bba 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-logical-legalize.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-logical-legalize.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %bnot_b32x4(b32x4) -> b32x4 { -ebb0(v0: b32x4): +block0(v0: b32x4): v1 = bnot v0 ; check: v2 = vconst.b32x4 0xffffffffffffffffffffffffffffffff ; nextln: v1 = bxor v2, v0 @@ -11,7 +11,7 @@ ebb0(v0: b32x4): } function %vany_true_b32x4(b32x4) -> b1 { -ebb0(v0: b32x4): +block0(v0: b32x4): v1 = vany_true v0 ; check: v2 = x86_ptest v0, v0 ; nextln: v1 = trueif ne v2 @@ -19,7 +19,7 @@ ebb0(v0: b32x4): } function %vall_true_i64x2(i64x2) -> b1 { -ebb0(v0: i64x2): +block0(v0: i64x2): v1 = vall_true v0 ; check: v2 = vconst.i64x2 0x00 ; nextln: v3 = icmp eq v0, v2 diff --git a/cranelift/filetests/filetests/isa/x86/simd-logical-rodata.clif b/cranelift/filetests/filetests/isa/x86/simd-logical-rodata.clif index 1c5fb89733..619d300bfe 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-logical-rodata.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-logical-rodata.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %bnot_b32x4(b32x4) -> b32x4 { -ebb0(v0: b32x4): +block0(v0: b32x4): v1 = bnot v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x86/simd-logical-run.clif b/cranelift/filetests/filetests/isa/x86/simd-logical-run.clif index 9b525f2e10..35fc44bc6a 100644 --- a/cranelift/filetests/filetests/isa/x86/simd-logical-run.clif +++ b/cranelift/filetests/filetests/isa/x86/simd-logical-run.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 skylake function %bnot() -> b32 { -ebb0: +block0: v0 = vconst.b32x4 [true true true false] v1 = bnot v0 v2 = extractlane v1, 3 @@ -12,7 +12,7 @@ ebb0: ; run function %band_not() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [1 0 0 0 0 0 0 0] v1 = vconst.i16x8 [0 0 0 0 0 0 0 0] v2 = band_not v0, v1 @@ -23,7 +23,7 @@ ebb0: ; run function %vany_true_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [1 0 0 0 0 0 0 0] v1 = vany_true v0 return v1 @@ -31,7 +31,7 @@ ebb0: ; run function %vany_true_b32x4() -> b1 { -ebb0: +block0: v0 = vconst.b32x4 [false false false false] v1 = vany_true v0 v2 = bint.i32 v1 @@ -41,7 +41,7 @@ ebb0: ; run function %vall_true_i16x8() -> b1 { -ebb0: +block0: v0 = vconst.i16x8 [1 0 0 0 0 0 0 0] v1 = vall_true v0 v2 = bint.i32 v1 @@ -51,7 +51,7 @@ ebb0: ; run function %vall_true_b32x4() -> b1 { -ebb0: +block0: v0 = vconst.b32x4 [true true true true] v1 = vall_true v0 return v1 diff --git a/cranelift/filetests/filetests/isa/x86/stack-addr64.clif b/cranelift/filetests/filetests/isa/x86/stack-addr64.clif index c80d190907..bcb441cd6b 100644 --- a/cranelift/filetests/filetests/isa/x86/stack-addr64.clif +++ b/cranelift/filetests/filetests/isa/x86/stack-addr64.clif @@ -16,7 +16,7 @@ function %stack_addr() { ss4 = explicit_slot 8, offset 0 ss5 = explicit_slot 8, offset 1024 -ebb0: +block0: [-,%rcx] v0 = stack_addr.i64 ss0 ; bin: 48 8d 8c 24 00000808 [-,%rcx] v1 = stack_addr.i64 ss1 ; bin: 48 8d 8c 24 00000408 [-,%rcx] v2 = stack_addr.i64 ss2 ; bin: 48 8d 8c 24 00000008 diff --git a/cranelift/filetests/filetests/isa/x86/stack-load-store64.clif b/cranelift/filetests/filetests/isa/x86/stack-load-store64.clif index 3c0e2c8c0e..a74a1dfc32 100644 --- a/cranelift/filetests/filetests/isa/x86/stack-load-store64.clif +++ b/cranelift/filetests/filetests/isa/x86/stack-load-store64.clif @@ -6,7 +6,7 @@ target x86_64 haswell function %stack_load_and_store() { ss0 = explicit_slot 8, offset 0 -ebb0: +block0: v0 = stack_load.i64 ss0 ; check: v1 = stack_addr.i64 ss0 diff --git a/cranelift/filetests/filetests/isa/x86/stack-load-store8.clif b/cranelift/filetests/filetests/isa/x86/stack-load-store8.clif index 2c368f6dfc..2c5bb1553b 100644 --- a/cranelift/filetests/filetests/isa/x86/stack-load-store8.clif +++ b/cranelift/filetests/filetests/isa/x86/stack-load-store8.clif @@ -4,7 +4,7 @@ target x86_64 function u0:0(i8) -> i8 { ss0 = explicit_slot 1 -ebb0(v0: i8): +block0(v0: i8): stack_store v0, ss0 ; check: v2 = stack_addr.i64 ss0 ; nextln: v3 = uextend.i32 v0 diff --git a/cranelift/filetests/filetests/isa/x86/uextend-i8-to-i16.clif b/cranelift/filetests/filetests/isa/x86/uextend-i8-to-i16.clif index d92da90343..7d778aa778 100644 --- a/cranelift/filetests/filetests/isa/x86/uextend-i8-to-i16.clif +++ b/cranelift/filetests/filetests/isa/x86/uextend-i8-to-i16.clif @@ -2,13 +2,13 @@ test compile target x86_64 function u0:0(i8) -> i16 fast { -ebb0(v0: i8): +block0(v0: i8): v1 = uextend.i16 v0 return v1 } function u0:1(i8) -> i16 fast { -ebb0(v0: i8): +block0(v0: i8): v1 = sextend.i16 v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x86/vconst-binemit.clif b/cranelift/filetests/filetests/isa/x86/vconst-binemit.clif index 2d6f862679..15522e3d38 100644 --- a/cranelift/filetests/filetests/isa/x86/vconst-binemit.clif +++ b/cranelift/filetests/filetests/isa/x86/vconst-binemit.clif @@ -4,7 +4,7 @@ set enable_simd target x86_64 function %test_vconst_b8() { -ebb0: +block0: [-, %xmm2] v0 = vconst.b8x16 0x01 ; bin: 0f 10 15 00000008 PCRelRodata4(15) [-, %xmm3] v1 = vconst.b8x16 0x02 ; bin: 0f 10 1d 00000011 PCRelRodata4(31) return diff --git a/cranelift/filetests/filetests/isa/x86/vconst-opt-run.clif b/cranelift/filetests/filetests/isa/x86/vconst-opt-run.clif index 487ff4f844..9294614c96 100644 --- a/cranelift/filetests/filetests/isa/x86/vconst-opt-run.clif +++ b/cranelift/filetests/filetests/isa/x86/vconst-opt-run.clif @@ -5,7 +5,7 @@ target x86_64 ; TODO move to vconst-run.clif function %test_vconst_zeroes() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 0x00 v1 = extractlane v0, 4 v2 = icmp_imm eq v1, 0 @@ -14,7 +14,7 @@ ebb0: ; run function %test_vconst_ones() -> b1 { -ebb0: +block0: v0 = vconst.i8x16 0xffffffffffffffffffffffffffffffff v1 = extractlane v0, 2 v2 = icmp_imm eq v1, 0xff diff --git a/cranelift/filetests/filetests/isa/x86/vconst-opt.clif b/cranelift/filetests/filetests/isa/x86/vconst-opt.clif index 4daeed8abe..bc444b7784 100644 --- a/cranelift/filetests/filetests/isa/x86/vconst-opt.clif +++ b/cranelift/filetests/filetests/isa/x86/vconst-opt.clif @@ -5,7 +5,7 @@ target x86_64 ; TODO move to vconst-compile.clif or vconst-binemit.clif function %test_vconst_optimizations() { -ebb0: +block0: [-, %xmm4] v0 = vconst.b8x16 0x00 ; bin: 66 0f ef e4 [-, %xmm7] v1 = vconst.b8x16 0xffffffffffffffffffffffffffffffff ; bin: 66 0f 74 ff return diff --git a/cranelift/filetests/filetests/isa/x86/vconst-rodata.clif b/cranelift/filetests/filetests/isa/x86/vconst-rodata.clif index 34c203dce6..0df8493d5d 100644 --- a/cranelift/filetests/filetests/isa/x86/vconst-rodata.clif +++ b/cranelift/filetests/filetests/isa/x86/vconst-rodata.clif @@ -3,7 +3,7 @@ set enable_simd=true target x86_64 haswell function %test_vconst_i32() -> i32x4 { -ebb0: +block0: v0 = vconst.i32x4 0x1234 return v0 } @@ -11,7 +11,7 @@ ebb0: ; sameln: [34, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] function %test_vconst_b16() -> b16x8 { -ebb0: +block0: v0 = vconst.b16x8 [true false true false true false true true] return v0 } diff --git a/cranelift/filetests/filetests/isa/x86/vconst-run.clif b/cranelift/filetests/filetests/isa/x86/vconst-run.clif index 9ec160c2e5..60d94fbccd 100644 --- a/cranelift/filetests/filetests/isa/x86/vconst-run.clif +++ b/cranelift/filetests/filetests/isa/x86/vconst-run.clif @@ -2,7 +2,7 @@ test run set enable_simd function %test_vconst_syntax() -> b1 { -ebb0: +block0: v0 = vconst.i32x4 0x00000004_00000003_00000002_00000001 ; build constant using hexadecimal syntax v1 = vconst.i32x4 [1 2 3 4] ; build constant using literal list syntax diff --git a/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64.clif b/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64.clif index a621abfe9f..55a6c59bed 100644 --- a/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64.clif +++ b/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64.clif @@ -5,7 +5,7 @@ target x86_64 haswell ; check if for one arg we use the right register function %one_arg(i64) windows_fastcall { -ebb0(v0: i64): +block0(v0: i64): return } ; check: function %one_arg(i64 [%rcx], i64 fp [%rbp]) -> i64 fp [%rbp] windows_fastcall { @@ -13,33 +13,33 @@ ebb0(v0: i64): ; check if we still use registers for 4 arguments function %four_args(i64, i64, i64, i64) windows_fastcall { -ebb0(v0: i64, v1: i64, v2: i64, v3: i64): +block0(v0: i64, v1: i64, v2: i64, v3: i64): return } ; check: function %four_args(i64 [%rcx], i64 [%rdx], i64 [%r8], i64 [%r9], i64 fp [%rbp]) -> i64 fp [%rbp] windows_fastcall { ; check if float arguments are passed through XMM registers function %four_float_args(f64, f64, f64, f64) windows_fastcall { -ebb0(v0: f64, v1: f64, v2: f64, v3: f64): +block0(v0: f64, v1: f64, v2: f64, v3: f64): return } ; check: function %four_float_args(f64 [%xmm0], f64 [%xmm1], f64 [%xmm2], f64 [%xmm3], i64 fp [%rbp]) -> i64 fp [%rbp] windows_fastcall { ; check if we use stack space for > 4 arguments function %five_args(i64, i64, i64, i64, i64) windows_fastcall { -ebb0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64): +block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64): return } ; check: function %five_args(i64 [%rcx], i64 [%rdx], i64 [%r8], i64 [%r9], i64 [32], i64 fp [%rbp]) -> i64 fp [%rbp] windows_fastcall { function %mixed_int_float(i64, f64, i64, f32) windows_fastcall { -ebb0(v0: i64, v1: f64, v2: i64, v3: f32): +block0(v0: i64, v1: f64, v2: i64, v3: f32): return } ; check: function %mixed_int_float(i64 [%rcx], f64 [%xmm1], i64 [%r8], f32 [%xmm3], i64 fp [%rbp]) -> i64 fp [%rbp] windows_fastcall { function %ret_val_float(f32, f64, i64, i64) -> f64 windows_fastcall { -ebb0(v0: f32, v1: f64, v2: i64, v3: i64): +block0(v0: f32, v1: f64, v2: i64, v3: i64): return v1 } ; check: function %ret_val_float(f32 [%xmm0], f64 [%xmm1], i64 [%r8], i64 [%r9], i64 fp [%rbp]) -> f64 [%xmm0], i64 fp [%rbp] windows_fastcall { diff --git a/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64_unwind.clif b/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64_unwind.clif index 7dc024f33c..b146f0ac76 100644 --- a/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64_unwind.clif +++ b/cranelift/filetests/filetests/isa/x86/windows_fastcall_x64_unwind.clif @@ -5,14 +5,14 @@ target x86_64 haswell ; check that there is no unwind information for a system_v function function %not_fastcall() system_v { -ebb0: +block0: return } ; sameln: No unwind information. ; check the unwind information with a function with no args function %no_args() windows_fastcall { -ebb0: +block0: return } ; sameln: UnwindInfo { @@ -47,7 +47,7 @@ ebb0: ; check a function with medium-sized stack alloc function %medium_stack() windows_fastcall { ss0 = explicit_slot 100000 -ebb0: +block0: return } ; sameln: UnwindInfo { @@ -84,7 +84,7 @@ ebb0: ; check a function with large-sized stack alloc function %large_stack() windows_fastcall { ss0 = explicit_slot 524288 -ebb0: +block0: return } ; sameln: UnwindInfo { @@ -120,7 +120,7 @@ ebb0: ; check a function that has CSRs function %lots_of_registers(i64, i64) windows_fastcall { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = load.i32 v0+0 v3 = load.i32 v0+8 v4 = load.i32 v0+16 diff --git a/cranelift/filetests/filetests/isa/x86/windows_systemv_x64_fde.clif b/cranelift/filetests/filetests/isa/x86/windows_systemv_x64_fde.clif index 1444359de7..31b75b6c16 100644 --- a/cranelift/filetests/filetests/isa/x86/windows_systemv_x64_fde.clif +++ b/cranelift/filetests/filetests/isa/x86/windows_systemv_x64_fde.clif @@ -5,14 +5,14 @@ target x86_64 haswell ; check that there is no libunwind information for a windows_fastcall function function %not_fastcall() windows_fastcall { -ebb0: +block0: return } ; sameln: No unwind information. ; check the libunwind information with a function with no args function %no_args() system_v { -ebb0: +block0: return } ; sameln: 0x00000000: CIE diff --git a/cranelift/filetests/filetests/legalizer/bitrev-i128.clif b/cranelift/filetests/filetests/legalizer/bitrev-i128.clif index 5a539d0e89..fad0f2aace 100644 --- a/cranelift/filetests/filetests/legalizer/bitrev-i128.clif +++ b/cranelift/filetests/filetests/legalizer/bitrev-i128.clif @@ -2,12 +2,12 @@ test legalizer target x86_64 function %reverse_bits(i128) -> i128 { -ebb0(v0: i128): +block0(v0: i128): v1 = bitrev.i128 v0 return v1 } -; check: ebb0(v2: i64, v3: i64): +; check: block0(v2: i64, v3: i64): ; check: v0 = iconcat v2, v3 ; check: v33 = iconst.i64 0xaaaa_aaaa_aaaa_aaaa ; check: v6 = band v2, v33 diff --git a/cranelift/filetests/filetests/legalizer/bitrev.clif b/cranelift/filetests/filetests/legalizer/bitrev.clif index b7ee07735e..5651d7a7f3 100644 --- a/cranelift/filetests/filetests/legalizer/bitrev.clif +++ b/cranelift/filetests/filetests/legalizer/bitrev.clif @@ -2,7 +2,7 @@ test legalizer target x86_64 function %reverse_bits_8(i8) -> i8 { -ebb0(v0: i8): +block0(v0: i8): v1 = bitrev.i8 v0 return v1 } @@ -57,7 +57,7 @@ ebb0(v0: i8): ; check: return v1 function %reverse_bits_16(i16) -> i16 { -ebb0(v0: i16): +block0(v0: i16): v1 = bitrev.i16 v0 return v1 } @@ -128,7 +128,7 @@ ebb0(v0: i16): ; check: return v1 function %reverse_bits_32(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = bitrev.i32 v0 return v1 } @@ -162,7 +162,7 @@ ebb0(v0: i32): function %reverse_bits_64(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = bitrev.i64 v0 return v1 } diff --git a/cranelift/filetests/filetests/legalizer/br_table_cond.clif b/cranelift/filetests/filetests/legalizer/br_table_cond.clif index dc0bd6473c..9677e2c9f3 100644 --- a/cranelift/filetests/filetests/legalizer/br_table_cond.clif +++ b/cranelift/filetests/filetests/legalizer/br_table_cond.clif @@ -5,60 +5,60 @@ target x86_64 ; Test that when jump_tables_enables is false, all jump tables are eliminated. ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ function u0:0(i64 vmctx) baldrdash_system_v { gv0 = vmctx gv1 = iadd_imm.i64 gv0, 48 - jt0 = jump_table [ebb2, ebb2, ebb7] - jt1 = jump_table [ebb8, ebb8] + jt0 = jump_table [block2, block2, block7] + jt1 = jump_table [block8, block8] -ebb0(v0: i64): - jump ebb5 +block0(v0: i64): + jump block5 -ebb5: +block5: v1 = global_value.i64 gv1 v2 = load.i64 v1 trapnz v2, interrupt v3 = iconst.i32 0 - br_table v3, ebb3, jt0 -; check: ebb5: + br_table v3, block3, jt0 +; check: block5: ; check: $(val0=$V) = iconst.i32 0 ; nextln: $(cmp0=$V) = icmp_imm eq $val0, 0 -; nextln: brnz $cmp0, ebb2 -; nextln: jump $(fail0=$EBB) +; nextln: brnz $cmp0, block2 +; nextln: jump $(fail0=$BB) ; check: $fail0: ; nextln: $(cmp1=$V) = icmp_imm.i32 eq $val0, 1 -; nextln: brnz $cmp1, ebb2 -; nextln: jump $(fail1=$EBB) +; nextln: brnz $cmp1, block2 +; nextln: jump $(fail1=$BB) ; check: $fail1: ; nextln: $(cmp2=$V) = icmp_imm.i32 eq $val0, 2 -; nextln: brnz $cmp2, ebb7 -; nextln: jump ebb3 +; nextln: brnz $cmp2, block7 +; nextln: jump block3 -ebb7: +block7: v4 = iconst.i32 0 - br_table v4, ebb3, jt1 -; check: ebb7: + br_table v4, block3, jt1 +; check: block7: ; check: $(val1=$V) = iconst.i32 0 ; nextln: $(cmp3=$V) = icmp_imm eq $val1, 0 -; nextln: brnz $cmp3, ebb8 -; nextln: jump $(fail3=$EBB) +; nextln: brnz $cmp3, block8 +; nextln: jump $(fail3=$BB) ; check: $fail3: ; nextln: $(cmp4=$V) = icmp_imm.i32 eq $val1, 1 -; nextln: brnz $cmp4, ebb8 -; nextln: jump ebb3 +; nextln: brnz $cmp4, block8 +; nextln: jump block3 -ebb8: - jump ebb5 +block8: + jump block5 -ebb3: - jump ebb2 +block3: + jump block2 -ebb2: - jump ebb1 +block2: + jump block1 -ebb1: +block1: fallthrough_return } ; not: jump_table diff --git a/cranelift/filetests/filetests/legalizer/bxor_imm.clif b/cranelift/filetests/filetests/legalizer/bxor_imm.clif index 19372613ff..bf959a7364 100644 --- a/cranelift/filetests/filetests/legalizer/bxor_imm.clif +++ b/cranelift/filetests/filetests/legalizer/bxor_imm.clif @@ -2,7 +2,7 @@ test legalizer target x86_64 function %foo(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = bxor_imm.i64 v0, 0x100000000 return v2 } diff --git a/cranelift/filetests/filetests/legalizer/empty_br_table.clif b/cranelift/filetests/filetests/legalizer/empty_br_table.clif index b043733fba..606a07f605 100644 --- a/cranelift/filetests/filetests/legalizer/empty_br_table.clif +++ b/cranelift/filetests/filetests/legalizer/empty_br_table.clif @@ -6,12 +6,12 @@ target x86_64 function u0:0(i64) { jt0 = jump_table [] -ebb0(v0: i64): - br_table v0, ebb1, jt0 -; check: ebb0(v0: i64): -; nextln: jump ebb1 +block0(v0: i64): + br_table v0, block1, jt0 +; check: block0(v0: i64): +; nextln: jump block1 -ebb1: +block1: return } ; not: jump_table diff --git a/cranelift/filetests/filetests/legalizer/iconst-i64.clif b/cranelift/filetests/filetests/legalizer/iconst-i64.clif index a3c9168416..6aa7361b45 100644 --- a/cranelift/filetests/filetests/legalizer/iconst-i64.clif +++ b/cranelift/filetests/filetests/legalizer/iconst-i64.clif @@ -2,7 +2,7 @@ test legalizer target i686 function %foo() -> i64 { -ebb0: +block0: v1 = iconst.i64 0x6400000042 return v1 } diff --git a/cranelift/filetests/filetests/legalizer/isplit-bb.clif b/cranelift/filetests/filetests/legalizer/isplit-bb.clif index 10ab41c440..7e55eb1eb9 100644 --- a/cranelift/filetests/filetests/legalizer/isplit-bb.clif +++ b/cranelift/filetests/filetests/legalizer/isplit-bb.clif @@ -2,23 +2,23 @@ test legalizer target x86_64 function u0:0(i128, i128, i64) -> i128 system_v { -ebb0(v0: i128, v1: i128, v2: i64): - jump ebb1 +block0(v0: i128, v1: i128, v2: i64): + jump block1 -ebb1: +block1: v17 = iadd v0, v1 v20 = iadd v1, v17 - jump ebb79 + jump block79 -ebb79: +block79: v425 = iconst.i64 0 v426 = icmp_imm eq v425, 1 - brnz v426, ebb80 - jump ebb85(v20, v17) + brnz v426, block80 + jump block85(v20, v17) -ebb80: +block80: trap user0 -ebb85(v462: i128, v874: i128): +block85(v462: i128, v874: i128): trap user0 } diff --git a/cranelift/filetests/filetests/legalizer/popcnt-i128.clif b/cranelift/filetests/filetests/legalizer/popcnt-i128.clif index 0ecf7f74c5..f4919f4781 100644 --- a/cranelift/filetests/filetests/legalizer/popcnt-i128.clif +++ b/cranelift/filetests/filetests/legalizer/popcnt-i128.clif @@ -2,7 +2,7 @@ test legalizer target i686 function %foo() -> i128 { -ebb0: +block0: v1 = iconst.i64 0x6400000042 v2 = iconst.i64 0x7F10100042 v3 = iconcat v1, v2 diff --git a/cranelift/filetests/filetests/licm/basic.clif b/cranelift/filetests/filetests/licm/basic.clif index 3f5dfbbe14..b089d0b182 100644 --- a/cranelift/filetests/filetests/licm/basic.clif +++ b/cranelift/filetests/filetests/licm/basic.clif @@ -3,39 +3,39 @@ target riscv32 function %simple_loop(i32) -> i32 { -ebb0(v0: i32): - jump ebb1(v0) +block0(v0: i32): + jump block1(v0) -ebb1(v1: i32): +block1(v1: i32): v2 = iconst.i32 1 v3 = iconst.i32 2 v4 = iadd v2, v3 - brz v1, ebb3(v1) - jump ebb2 + brz v1, block3(v1) + jump block2 -ebb2: +block2: v5 = isub v1, v2 - jump ebb1(v5) + jump block1(v5) -ebb3(v6: i32): +block3(v6: i32): return v6 } ; sameln: function %simple_loop -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v2 = iconst.i32 1 ; nextln: v3 = iconst.i32 2 ; nextln: v4 = iadd v2, v3 -; nextln: jump ebb1(v0) +; nextln: jump block1(v0) ; nextln: -; nextln: ebb1(v1: i32): -; nextln: brz v1, ebb3(v1) -; nextln: jump ebb2 +; nextln: block1(v1: i32): +; nextln: brz v1, block3(v1) +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v5 = isub.i32 v1, v2 -; nextln: jump ebb1(v5) +; nextln: jump block1(v5) ; nextln: -; nextln: ebb3(v6: i32): +; nextln: block3(v6: i32): ; nextln: return v6 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/complex.clif b/cranelift/filetests/filetests/licm/complex.clif index 2774cde5b3..ab9c905e39 100644 --- a/cranelift/filetests/filetests/licm/complex.clif +++ b/cranelift/filetests/filetests/licm/complex.clif @@ -2,95 +2,95 @@ test licm target riscv32 function %complex(i32) -> i32 system_v { -ebb0(v0: i32): -[UJ#1b] jump ebb1(v0) +block0(v0: i32): +[UJ#1b] jump block1(v0) - ebb1(v1: i32): + block1(v1: i32): v2 = iconst.i32 1 v3 = iconst.i32 4 v4 = iadd v2, v1 -[SBzero#18] brz v1, ebb2(v2) -[UJ#1b] jump ebb4(v4) +[SBzero#18] brz v1, block2(v2) +[UJ#1b] jump block4(v4) - ebb2(v5: i32): + block2(v5: i32): v6 = iconst.i32 2 v7 = iadd v5, v4 v8 = iadd v6, v1 -[UJ#1b] jump ebb3(v8) +[UJ#1b] jump block3(v8) - ebb3(v9: i32): + block3(v9: i32): v10 = iadd v9, v5 v11 = iadd.i32 v1, v4 -[SBzero#18] brz.i32 v1, ebb2(v9) -[UJ#1b] jump ebb6(v10) +[SBzero#18] brz.i32 v1, block2(v9) +[UJ#1b] jump block6(v10) - ebb4(v12: i32): + block4(v12: i32): v13 = iconst.i32 3 v14 = iadd v12, v13 v15 = iadd.i32 v4, v13 -[UJ#1b] jump ebb5(v13) +[UJ#1b] jump block5(v13) - ebb5(v16: i32): + block5(v16: i32): v17 = iadd.i32 v14, v4 -[SBzero#18] brz.i32 v1, ebb4(v16) -[UJ#1b] jump ebb6(v16) +[SBzero#18] brz.i32 v1, block4(v16) +[UJ#1b] jump block6(v16) - ebb6(v18: i32): + block6(v18: i32): v19 = iadd v18, v2 v20 = iadd.i32 v2, v3 -[SBzero#18] brz.i32 v1, ebb1(v20) -[UJ#1b] jump ebb7 +[SBzero#18] brz.i32 v1, block1(v20) +[UJ#1b] jump block7 - ebb7: + block7: [Iret#19] return v19 } ; sameln: function %complex -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v2 = iconst.i32 1 ; nextln: v3 = iconst.i32 4 ; nextln: v6 = iconst.i32 2 ; nextln: v13 = iconst.i32 3 ; nextln: v20 = iadd v2, v3 -; nextln: jump ebb1(v0) +; nextln: jump block1(v0) ; nextln: -; nextln: ebb1(v1: i32): +; nextln: block1(v1: i32): ; nextln: v4 = iadd.i32 v2, v1 -; nextln: brz v1, ebb8(v2) -; nextln: jump ebb9(v4) +; nextln: brz v1, block8(v2) +; nextln: jump block9(v4) ; nextln: -; nextln: ebb8(v21: i32): +; nextln: block8(v21: i32): ; nextln: v8 = iadd.i32 v6, v1 ; nextln: v11 = iadd.i32 v1, v4 -; nextln: jump ebb2(v21) +; nextln: jump block2(v21) ; nextln: -; nextln: ebb2(v5: i32): +; nextln: block2(v5: i32): ; nextln: v7 = iadd v5, v4 -; nextln: jump ebb3(v8) +; nextln: jump block3(v8) ; nextln: -; nextln: ebb3(v9: i32): +; nextln: block3(v9: i32): ; nextln: v10 = iadd v9, v5 -; nextln: brz.i32 v1, ebb2(v9) -; nextln: jump ebb6(v10) +; nextln: brz.i32 v1, block2(v9) +; nextln: jump block6(v10) ; nextln: -; nextln: ebb9(v22: i32): +; nextln: block9(v22: i32): ; nextln: v15 = iadd.i32 v4, v13 -; nextln: jump ebb4(v22) +; nextln: jump block4(v22) ; nextln: -; nextln: ebb4(v12: i32): +; nextln: block4(v12: i32): ; nextln: v14 = iadd v12, v13 -; nextln: jump ebb5(v13) +; nextln: jump block5(v13) ; nextln: -; nextln: ebb5(v16: i32): +; nextln: block5(v16: i32): ; nextln: v17 = iadd.i32 v14, v4 -; nextln: brz.i32 v1, ebb4(v16) -; nextln: jump ebb6(v16) +; nextln: brz.i32 v1, block4(v16) +; nextln: jump block6(v16) ; nextln: -; nextln: ebb6(v18: i32): +; nextln: block6(v18: i32): ; nextln: v19 = iadd v18, v2 -; nextln: brz.i32 v1, ebb1(v20) -; nextln: jump ebb7 +; nextln: brz.i32 v1, block1(v20) +; nextln: jump block7 ; nextln: -; nextln: ebb7: +; nextln: block7: ; nextln: return v19 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/critical-edge.clif b/cranelift/filetests/filetests/licm/critical-edge.clif index 89beb387cc..1940a4ed36 100644 --- a/cranelift/filetests/filetests/licm/critical-edge.clif +++ b/cranelift/filetests/filetests/licm/critical-edge.clif @@ -5,50 +5,50 @@ target riscv32 function %critical_edge(i32, i32) -> i32 { - ebb0(v0: i32, v7: i32): -[SBzero#38] brnz v7, ebb2(v0) -[UJ#1b] jump ebb1 + block0(v0: i32, v7: i32): +[SBzero#38] brnz v7, block2(v0) +[UJ#1b] jump block1 - ebb1: + block1: [Iret#19] return v0 - ebb2(v1: i32): + block2(v1: i32): v2 = iconst.i32 1 v3 = iconst.i32 2 v4 = iadd v2, v3 -[SBzero#18] brz v1, ebb4(v1) -[UJ#1b] jump ebb3 +[SBzero#18] brz v1, block4(v1) +[UJ#1b] jump block3 - ebb3: + block3: v5 = isub v1, v2 -[UJ#1b] jump ebb2(v5) +[UJ#1b] jump block2(v5) - ebb4(v6: i32): + block4(v6: i32): [Iret#19] return v6 } ; sameln: function %critical_edge -; nextln: ebb0(v0: i32, v7: i32): -; nextln: brnz v7, ebb5(v0) -; nextln: jump ebb1 +; nextln: block0(v0: i32, v7: i32): +; nextln: brnz v7, block5(v0) +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: return v0 ; nextln: -; nextln: ebb5(v8: i32): +; nextln: block5(v8: i32): ; nextln: v2 = iconst.i32 1 ; nextln: v3 = iconst.i32 2 ; nextln: v4 = iadd v2, v3 -; nextln: jump ebb2(v8) +; nextln: jump block2(v8) ; nextln: -; nextln: ebb2(v1: i32): -; nextln: brz v1, ebb4(v1) -; nextln: jump ebb3 +; nextln: block2(v1: i32): +; nextln: brz v1, block4(v1) +; nextln: jump block3 ; nextln: -; nextln: ebb3: +; nextln: block3: ; nextln: v5 = isub.i32 v1, v2 -; nextln: jump ebb2(v5) +; nextln: jump block2(v5) ; nextln: -; nextln: ebb4(v6: i32): +; nextln: block4(v6: i32): ; nextln: return v6 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/encoding.clif b/cranelift/filetests/filetests/licm/encoding.clif index b029a51c18..2b0114d2d0 100644 --- a/cranelift/filetests/filetests/licm/encoding.clif +++ b/cranelift/filetests/filetests/licm/encoding.clif @@ -4,39 +4,39 @@ target riscv32 ; Ensure that instructions emitted by LICM get encodings. function %simple_loop(i32) -> i32 { - ebb0(v0: i32): -[UJ#1b] jump ebb1(v0) + block0(v0: i32): +[UJ#1b] jump block1(v0) - ebb1(v1: i32): + block1(v1: i32): [Iz#04,%x0] v2 = iconst.i32 1 [Iz#04,%x1] v3 = iconst.i32 2 [R#0c,%x2] v4 = iadd v2, v3 -[SBzero#18] brz v1, ebb3(v1) -[UJ#1b] jump ebb2 +[SBzero#18] brz v1, block3(v1) +[UJ#1b] jump block2 - ebb2: + block2: [R#200c,%x5] v5 = isub v1, v2 -[UJ#1b] jump ebb1(v5) +[UJ#1b] jump block1(v5) - ebb3(v6: i32): + block3(v6: i32): [Iret#19] return v6 } ; check: function -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: [Iz#04,%x0] v2 = iconst.i32 1 ; nextln: [Iz#04,%x1] v3 = iconst.i32 2 ; nextln: [R#0c,%x2] v4 = iadd v2, v3 -; nextln: [UJ#1b] jump ebb1(v0) +; nextln: [UJ#1b] jump block1(v0) ; nextln: -; nextln: ebb1(v1: i32): -; nextln: [SBzero#18] brz v1, ebb3(v1) -; nextln: [UJ#1b] jump ebb2 +; nextln: block1(v1: i32): +; nextln: [SBzero#18] brz v1, block3(v1) +; nextln: [UJ#1b] jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: [R#200c,%x5] v5 = isub.i32 v1, v2 -; nextln: [UJ#1b] jump ebb1(v5) +; nextln: [UJ#1b] jump block1(v5) ; nextln: -; nextln: ebb3(v6: i32): +; nextln: block3(v6: i32): ; nextln: [Iret#19] return v6 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/jump-table-entry.clif b/cranelift/filetests/filetests/licm/jump-table-entry.clif index cbf51cd080..6f754185a5 100644 --- a/cranelift/filetests/filetests/licm/jump-table-entry.clif +++ b/cranelift/filetests/filetests/licm/jump-table-entry.clif @@ -2,32 +2,32 @@ test licm target x86_64 function %dont_hoist_jump_table_entry_during_licm() { - jt0 = jump_table [ebb1, ebb1] + jt0 = jump_table [block1, block1] -ebb0: - fallthrough ebb1 +block0: + fallthrough block1 -ebb1: ; the loop! +block1: ; the loop! v2 = iconst.i32 42 v3 = ifcmp_imm v2, 0 - brif uge v3, ebb1 - fallthrough ebb2 + brif uge v3, block1 + fallthrough block2 -ebb2: +block2: v1 = iconst.i64 -14 v8 = ifcmp_imm v1, 2 - brif uge v8, ebb1 - jump ebb3 + brif uge v8, block1 + jump block3 -ebb3: +block3: v5 = jump_table_base.i64 jt0 v6 = jump_table_entry.i64 v1, v5, 4, jt0 v7 = iadd v5, v6 indirect_jump_table_br v7, jt0 -; check: ebb2: +; check: block2: ; nextln: v8 = ifcmp_imm.i64 v1, 2 -; nextln: brif uge v8, ebb1 -; nextln: jump ebb3 -; check: ebb3: +; nextln: brif uge v8, block1 +; nextln: jump block3 +; check: block3: ; nextln: jump_table_entry.i64 } diff --git a/cranelift/filetests/filetests/licm/load_readonly_notrap.clif b/cranelift/filetests/filetests/licm/load_readonly_notrap.clif index 4731bd664e..f663646b9e 100644 --- a/cranelift/filetests/filetests/licm/load_readonly_notrap.clif +++ b/cranelift/filetests/filetests/licm/load_readonly_notrap.clif @@ -10,22 +10,22 @@ function %hoist_load(i32, i64 vmctx) -> i32 { gv1 = load.i64 notrap aligned readonly gv0 heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 -ebb0(v0: i32, v1: i64): - jump ebb1(v0, v1) +block0(v0: i32, v1: i64): + jump block1(v0, v1) -ebb1(v2: i32, v3: i64): +block1(v2: i32, v3: i64): v4 = iconst.i32 1 v5 = heap_addr.i64 heap0, v4, 1 v6 = load.i32 notrap aligned readonly v5 v7 = iadd v2, v6 - brz v2, ebb3(v2) - jump ebb2 + brz v2, block3(v2) + jump block2 -ebb2: +block2: v8 = isub v2, v4 - jump ebb1(v8, v3) + jump block1(v8, v3) -ebb3(v9: i32): +block3(v9: i32): return v9 } @@ -34,21 +34,21 @@ ebb3(v9: i32): ; nextln: gv1 = load.i64 notrap aligned readonly gv0 ; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32 ; nextln: -; nextln: ebb0(v0: i32, v1: i64): +; nextln: block0(v0: i32, v1: i64): ; nextln: v4 = iconst.i32 1 ; nextln: v5 = heap_addr.i64 heap0, v4, 1 ; nextln: v6 = load.i32 notrap aligned readonly v5 -; nextln: jump ebb1(v0, v1) +; nextln: jump block1(v0, v1) ; nextln: -; nextln: ebb1(v2: i32, v3: i64): +; nextln: block1(v2: i32, v3: i64): ; nextln: v7 = iadd v2, v6 -; nextln: brz v2, ebb3(v2) -; nextln: jump ebb2 +; nextln: brz v2, block3(v2) +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v8 = isub.i32 v2, v4 -; nextln: jump ebb1(v8, v3) +; nextln: jump block1(v8, v3) ; nextln: -; nextln: ebb3(v9: i32): +; nextln: block3(v9: i32): ; nextln: return v9 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/multiple-blocks.clif b/cranelift/filetests/filetests/licm/multiple-blocks.clif index ea23505ef5..04cfb9d621 100644 --- a/cranelift/filetests/filetests/licm/multiple-blocks.clif +++ b/cranelift/filetests/filetests/licm/multiple-blocks.clif @@ -3,57 +3,57 @@ target riscv32 function %multiple_blocks(i32) -> i32 { -ebb0(v0: i32): - jump ebb1(v0) +block0(v0: i32): + jump block1(v0) -ebb1(v10: i32): +block1(v10: i32): v11 = iconst.i32 1 v12 = iconst.i32 2 v13 = iadd v11, v12 - brz v10, ebb4(v10) - jump ebb2 + brz v10, block4(v10) + jump block2 -ebb2: +block2: v15 = isub v10, v11 - brz v15, ebb5(v15) - jump ebb3 + brz v15, block5(v15) + jump block3 -ebb3: +block3: v14 = isub v10, v11 - jump ebb1(v14) + jump block1(v14) -ebb4(v20: i32): +block4(v20: i32): return v20 -ebb5(v30: i32): +block5(v30: i32): v31 = iadd v11, v13 - jump ebb1(v30) + jump block1(v30) } ; sameln:function %multiple_blocks(i32) -> i32 { -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v11 = iconst.i32 1 ; nextln: v12 = iconst.i32 2 ; nextln: v13 = iadd v11, v12 ; nextln: v31 = iadd v11, v13 -; nextln: jump ebb1(v0) +; nextln: jump block1(v0) ; nextln: -; nextln: ebb1(v10: i32): -; nextln: brz v10, ebb4(v10) -; nextln: jump ebb2 +; nextln: block1(v10: i32): +; nextln: brz v10, block4(v10) +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v15 = isub.i32 v10, v11 -; nextln: brz v15, ebb5(v15) -; nextln: jump ebb3 +; nextln: brz v15, block5(v15) +; nextln: jump block3 ; nextln: -; nextln: ebb3: +; nextln: block3: ; nextln: v14 = isub.i32 v10, v11 -; nextln: jump ebb1(v14) +; nextln: jump block1(v14) ; nextln: -; nextln: ebb4(v20: i32): +; nextln: block4(v20: i32): ; nextln: return v20 ; nextln: -; nextln: ebb5(v30: i32): -; nextln: jump ebb1(v30) +; nextln: block5(v30: i32): +; nextln: jump block1(v30) ; nextln: } diff --git a/cranelift/filetests/filetests/licm/nested_loops.clif b/cranelift/filetests/filetests/licm/nested_loops.clif index 423b24d33f..7f9cb928db 100644 --- a/cranelift/filetests/filetests/licm/nested_loops.clif +++ b/cranelift/filetests/filetests/licm/nested_loops.clif @@ -3,60 +3,60 @@ target riscv32 function %nested_loops(i32) -> i32 { -ebb0(v0: i32): - jump ebb1(v0) +block0(v0: i32): + jump block1(v0) -ebb1(v1: i32): +block1(v1: i32): v2 = iconst.i32 1 v3 = iconst.i32 2 v4 = iadd v2, v3 v5 = isub v1, v2 - jump ebb2(v5, v5) + jump block2(v5, v5) -ebb2(v10: i32, v11: i32): - brz v11, ebb4(v10) - jump ebb3 +block2(v10: i32, v11: i32): + brz v11, block4(v10) + jump block3 -ebb3: +block3: v12 = iconst.i32 1 v15 = iadd v12, v5 v13 = isub v11, v12 - jump ebb2(v10,v13) + jump block2(v10,v13) -ebb4(v20: i32): - brz v20, ebb5(v20) - jump ebb1(v20) +block4(v20: i32): + brz v20, block5(v20) + jump block1(v20) -ebb5(v30: i32): +block5(v30: i32): return v30 } ; sameln:function %nested_loops(i32) -> i32 { -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v2 = iconst.i32 1 ; nextln: v3 = iconst.i32 2 ; nextln: v4 = iadd v2, v3 ; nextln: v12 = iconst.i32 1 -; nextln: jump ebb1(v0) +; nextln: jump block1(v0) ; nextln: -; nextln: ebb1(v1: i32): +; nextln: block1(v1: i32): ; nextln: v5 = isub v1, v2 ; nextln: v15 = iadd.i32 v12, v5 -; nextln: jump ebb2(v5, v5) +; nextln: jump block2(v5, v5) ; nextln: -; nextln: ebb2(v10: i32, v11: i32): -; nextln: brz v11, ebb4(v10) -; nextln: jump ebb3 +; nextln: block2(v10: i32, v11: i32): +; nextln: brz v11, block4(v10) +; nextln: jump block3 ; nextln: -; nextln: ebb3: +; nextln: block3: ; nextln: v13 = isub.i32 v11, v12 -; nextln: jump ebb2(v10, v13) +; nextln: jump block2(v10, v13) ; nextln: -; nextln: ebb4(v20: i32): -; nextln: brz v20, ebb5(v20) -; nextln: jump ebb1(v20) +; nextln: block4(v20: i32): +; nextln: brz v20, block5(v20) +; nextln: jump block1(v20) ; nextln: -; nextln: ebb5(v30: i32): +; nextln: block5(v30: i32): ; nextln: return v30 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/reject.clif b/cranelift/filetests/filetests/licm/reject.clif index 43823c1295..eab03760b6 100644 --- a/cranelift/filetests/filetests/licm/reject.clif +++ b/cranelift/filetests/filetests/licm/reject.clif @@ -3,92 +3,92 @@ target riscv32 function %other_side_effects(i32) -> i32 { -ebb0(v0: i32): - jump ebb1(v0) +block0(v0: i32): + jump block1(v0) -ebb1(v1: i32): +block1(v1: i32): regmove.i32 v0, %x10 -> %x20 -; check: ebb1(v1: i32): +; check: block1(v1: i32): ; check: regmove.i32 v0, %x10 -> %x20 v2 = iconst.i32 1 - brz v1, ebb3(v1) - jump ebb2 + brz v1, block3(v1) + jump block2 -ebb2: +block2: v5 = isub v1, v2 - jump ebb1(v5) + jump block1(v5) -ebb3(v6: i32): +block3(v6: i32): return v6 } function %cpu_flags(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): - jump ebb1(v0, v1) +block0(v0: i32, v1: i32): + jump block1(v0, v1) -ebb1(v2: i32, v3: i32): +block1(v2: i32, v3: i32): v4 = ifcmp.i32 v0, v1 v5 = selectif.i32 eq v4, v2, v3 -; check: ebb1(v2: i32, v3: i32): +; check: block1(v2: i32, v3: i32): ; check: ifcmp.i32 v0, v1 ; check: v5 = selectif.i32 eq v4, v2, v3 v8 = iconst.i32 1 - brz v1, ebb3(v1) - jump ebb2 + brz v1, block3(v1) + jump block2 -ebb2: +block2: v9 = isub v1, v8 v10 = iadd v1, v8 - jump ebb1(v9, v10) + jump block1(v9, v10) -ebb3(v6: i32): +block3(v6: i32): return v6 } function %spill(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = spill.i32 v0 - jump ebb1(v0, v1) + jump block1(v0, v1) -ebb1(v3: i32, v4: i32): +block1(v3: i32, v4: i32): v5 = spill.i32 v1 v6 = fill.i32 v2 v7 = fill.i32 v5 -; check: ebb1(v3: i32, v4: i32): +; check: block1(v3: i32, v4: i32): ; check: v5 = spill.i32 v1 ; check: v6 = fill.i32 v2 ; check: v7 = fill v5 - brz v1, ebb3(v1) - jump ebb2 + brz v1, block3(v1) + jump block2 -ebb2: +block2: v9 = isub v1, v4 - jump ebb1(v9, v3) + jump block1(v9, v3) -ebb3(v10: i32): +block3(v10: i32): return v10 } function %non_invariant_aliases(i32) -> i32 { -ebb0(v0: i32): - jump ebb1(v0) +block0(v0: i32): + jump block1(v0) -ebb1(v1: i32): +block1(v1: i32): v8 -> v1 v9 -> v1 v2 = iadd v8, v9 -; check: ebb1(v1: i32): +; check: block1(v1: i32): ; check: v2 = iadd v8, v9 - brz v1, ebb3(v1) - jump ebb2 + brz v1, block3(v1) + jump block2 -ebb2: +block2: v5 = isub v1, v2 - jump ebb1(v5) + jump block1(v5) -ebb3(v6: i32): +block3(v6: i32): return v6 } diff --git a/cranelift/filetests/filetests/licm/reject_load_notrap.clif b/cranelift/filetests/filetests/licm/reject_load_notrap.clif index 71385807e3..58f046357d 100644 --- a/cranelift/filetests/filetests/licm/reject_load_notrap.clif +++ b/cranelift/filetests/filetests/licm/reject_load_notrap.clif @@ -11,22 +11,22 @@ function %hoist_load(i32, i64 vmctx) -> i32 { gv1 = load.i64 notrap aligned readonly gv0 heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v4 = iconst.i32 1 v5 = heap_addr.i64 heap0, v4, 1 - jump ebb1(v0, v1) + jump block1(v0, v1) -ebb1(v2: i32, v3: i64): +block1(v2: i32, v3: i64): v6 = load.i32 notrap aligned v5 v7 = iadd v2, v6 - brz v2, ebb3(v2) - jump ebb2 + brz v2, block3(v2) + jump block2 -ebb2: +block2: v8 = isub v2, v4 - jump ebb1(v8, v3) + jump block1(v8, v3) -ebb3(v9: i32): +block3(v9: i32): return v9 } @@ -35,21 +35,21 @@ ebb3(v9: i32): ; nextln: gv1 = load.i64 notrap aligned readonly gv0 ; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32 ; nextln: -; nextln: ebb0(v0: i32, v1: i64): +; nextln: block0(v0: i32, v1: i64): ; nextln: v4 = iconst.i32 1 ; nextln: v5 = heap_addr.i64 heap0, v4, 1 -; nextln: jump ebb1(v0, v1) +; nextln: jump block1(v0, v1) ; nextln: -; nextln: ebb1(v2: i32, v3: i64): +; nextln: block1(v2: i32, v3: i64): ; nextln: v6 = load.i32 notrap aligned v5 ; nextln: v7 = iadd v2, v6 -; nextln: brz v2, ebb3(v2) -; nextln: jump ebb2 +; nextln: brz v2, block3(v2) +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v8 = isub.i32 v2, v4 -; nextln: jump ebb1(v8, v3) +; nextln: jump block1(v8, v3) ; nextln: -; nextln: ebb3(v9: i32): +; nextln: block3(v9: i32): ; nextln: return v9 ; nextln: } diff --git a/cranelift/filetests/filetests/licm/reject_load_readonly.clif b/cranelift/filetests/filetests/licm/reject_load_readonly.clif index ea7b72469e..f794bad6b0 100644 --- a/cranelift/filetests/filetests/licm/reject_load_readonly.clif +++ b/cranelift/filetests/filetests/licm/reject_load_readonly.clif @@ -11,22 +11,22 @@ function %hoist_load(i32, i64 vmctx) -> i32 { gv1 = load.i64 notrap aligned readonly gv0 heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 -ebb0(v0: i32, v1: i64): - jump ebb1(v0, v1) +block0(v0: i32, v1: i64): + jump block1(v0, v1) -ebb1(v2: i32, v3: i64): +block1(v2: i32, v3: i64): v4 = iconst.i32 1 v5 = heap_addr.i64 heap0, v4, 1 v6 = load.i32 aligned readonly v5 v7 = iadd v2, v6 - brz v2, ebb3(v2) - jump ebb2 + brz v2, block3(v2) + jump block2 -ebb2: +block2: v8 = isub v2, v4 - jump ebb1(v8, v3) + jump block1(v8, v3) -ebb3(v9: i32): +block3(v9: i32): return v9 } @@ -35,21 +35,21 @@ ebb3(v9: i32): ; nextln: gv1 = load.i64 notrap aligned readonly gv0 ; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32 ; nextln: -; nextln: ebb0(v0: i32, v1: i64): +; nextln: block0(v0: i32, v1: i64): ; nextln: v4 = iconst.i32 1 ; nextln: v5 = heap_addr.i64 heap0, v4, 1 -; nextln: jump ebb1(v0, v1) +; nextln: jump block1(v0, v1) ; nextln: -; nextln: ebb1(v2: i32, v3: i64): +; nextln: block1(v2: i32, v3: i64): ; nextln: v6 = load.i32 aligned readonly v5 ; nextln: v7 = iadd v2, v6 -; nextln: brz v2, ebb3(v2) -; nextln: jump ebb2 +; nextln: brz v2, block3(v2) +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v8 = isub.i32 v2, v4 -; nextln: jump ebb1(v8, v3) +; nextln: jump block1(v8, v3) ; nextln: -; nextln: ebb3(v9: i32): +; nextln: block3(v9: i32): ; nextln: return v9 ; nextln: } diff --git a/cranelift/filetests/filetests/parser/alias.clif b/cranelift/filetests/filetests/parser/alias.clif index 4e253bdb03..6197ae35d1 100644 --- a/cranelift/filetests/filetests/parser/alias.clif +++ b/cranelift/filetests/filetests/parser/alias.clif @@ -2,7 +2,7 @@ test cat test verifier function %basic(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 -> v0 v3 -> v1 v4 = iadd.i32 v2, v3 @@ -10,7 +10,7 @@ ebb0(v0: i32, v1: i32): } function %transitive() -> i32 { -ebb0: +block0: v0 = iconst.i32 0 v1 -> v0 v2 -> v1 @@ -20,7 +20,7 @@ ebb0: } function %duplicate(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 -> v0 v2 -> v0 v2 -> v0 diff --git a/cranelift/filetests/filetests/parser/branch.clif b/cranelift/filetests/filetests/parser/branch.clif index 4404feba14..c9a71312d9 100644 --- a/cranelift/filetests/filetests/parser/branch.clif +++ b/cranelift/filetests/filetests/parser/branch.clif @@ -3,114 +3,114 @@ test cat ; Jumps with no arguments. The '()' empty argument list is optional. function %minimal() { -ebb0: - jump ebb1 +block0: + jump block1 -ebb1: - jump ebb0() +block1: + jump block0() } ; sameln: function %minimal() fast { -; nextln: ebb0: -; nextln: jump ebb1 +; nextln: block0: +; nextln: jump block1 ; nextln: -; nextln: ebb1: -; nextln: jump ebb0 +; nextln: block1: +; nextln: jump block0 ; nextln: } ; Jumps with 1 arg. function %onearg(i32) { -ebb0(v90: i32): - jump ebb1(v90) +block0(v90: i32): + jump block1(v90) -ebb1(v91: i32): - jump ebb0(v91) +block1(v91: i32): + jump block0(v91) } ; sameln: function %onearg(i32) fast { -; nextln: ebb0(v90: i32): -; nextln: jump ebb1(v90) +; nextln: block0(v90: i32): +; nextln: jump block1(v90) ; nextln: -; nextln: ebb1(v91: i32): -; nextln: jump ebb0(v91) +; nextln: block1(v91: i32): +; nextln: jump block0(v91) ; nextln: } ; Jumps with 2 args. function %twoargs(i32, f32) { -ebb0(v90: i32, v91: f32): - jump ebb1(v90, v91) +block0(v90: i32, v91: f32): + jump block1(v90, v91) -ebb1(v92: i32, v93: f32): - jump ebb0(v92, v93) +block1(v92: i32, v93: f32): + jump block0(v92, v93) } ; sameln: function %twoargs(i32, f32) fast { -; nextln: ebb0(v90: i32, v91: f32): -; nextln: jump ebb1(v90, v91) +; nextln: block0(v90: i32, v91: f32): +; nextln: jump block1(v90, v91) ; nextln: -; nextln: ebb1(v92: i32, v93: f32): -; nextln: jump ebb0(v92, v93) +; nextln: block1(v92: i32, v93: f32): +; nextln: jump block0(v92, v93) ; nextln: } ; Branches with no arguments. The '()' empty argument list is optional. function %minimal(i32) { -ebb0(v90: i32): - brz v90, ebb1 +block0(v90: i32): + brz v90, block1 -ebb1: - brnz v90, ebb1() +block1: + brnz v90, block1() } ; sameln: function %minimal(i32) fast { -; nextln: ebb0(v90: i32): -; nextln: brz v90, ebb1 +; nextln: block0(v90: i32): +; nextln: brz v90, block1 ; nextln: -; nextln: ebb1: -; nextln: brnz.i32 v90, ebb1 +; nextln: block1: +; nextln: brnz.i32 v90, block1 ; nextln: } function %twoargs(i32, f32) { -ebb0(v90: i32, v91: f32): - brz v90, ebb1(v90, v91) +block0(v90: i32, v91: f32): + brz v90, block1(v90, v91) -ebb1(v92: i32, v93: f32): - brnz v90, ebb0(v92, v93) +block1(v92: i32, v93: f32): + brnz v90, block0(v92, v93) } ; sameln: function %twoargs(i32, f32) fast { -; nextln: ebb0(v90: i32, v91: f32): -; nextln: brz v90, ebb1(v90, v91) +; nextln: block0(v90: i32, v91: f32): +; nextln: brz v90, block1(v90, v91) ; nextln: -; nextln: ebb1(v92: i32, v93: f32): -; nextln: brnz.i32 v90, ebb0(v92, v93) +; nextln: block1(v92: i32, v93: f32): +; nextln: brnz.i32 v90, block0(v92, v93) ; nextln: } function %jumptable(i32) { jt200 = jump_table [] - jt2 = jump_table [ebb10, ebb40, ebb20, ebb30] + jt2 = jump_table [block10, block40, block20, block30] -ebb10(v3: i32): - br_table v3, ebb50, jt2 +block10(v3: i32): + br_table v3, block50, jt2 -ebb20: +block20: trap user2 -ebb30: +block30: trap user3 -ebb40: +block40: trap user4 -ebb50: +block50: trap user1 } ; sameln: function %jumptable(i32) fast { -; check: jt2 = jump_table [ebb10, ebb40, ebb20, ebb30] +; check: jt2 = jump_table [block10, block40, block20, block30] ; check: jt200 = jump_table [] -; check: ebb10(v3: i32): -; nextln: br_table v3, ebb50, jt2 +; check: block10(v3: i32): +; nextln: br_table v3, block50, jt2 ; nextln: -; nextln: ebb20: +; nextln: block20: ; nextln: trap user2 ; nextln: -; nextln: ebb30: +; nextln: block30: ; nextln: trap user3 ; nextln: -; nextln: ebb40: +; nextln: block40: ; nextln: trap user4 ; nextln: -; nextln: ebb50: +; nextln: block50: ; nextln: trap user1 ; nextln: } diff --git a/cranelift/filetests/filetests/parser/call.clif b/cranelift/filetests/filetests/parser/call.clif index 28e3011f33..35e43822d1 100644 --- a/cranelift/filetests/filetests/parser/call.clif +++ b/cranelift/filetests/filetests/parser/call.clif @@ -2,22 +2,22 @@ test cat function %mini() { -ebb1: +block1: return } ; sameln: function %mini() fast { -; nextln: ebb1: +; nextln: block1: ; nextln: return ; nextln: } function %r1() -> i32, f32 baldrdash_system_v { -ebb1: +block1: v1 = iconst.i32 3 v2 = f32const 0.0 return v1, v2 } ; sameln: function %r1() -> i32, f32 baldrdash_system_v { -; nextln: ebb1: +; nextln: block1: ; nextln: v1 = iconst.i32 3 ; nextln: v2 = f32const 0.0 ; nextln: return v1, v2 @@ -43,7 +43,7 @@ function %direct() { fn1 = %one() -> i32 fn2 = %two() -> i32, f32 -ebb0: +block0: call fn0() v1 = call fn1() v2, v3 = call fn2() @@ -59,7 +59,7 @@ function %indirect(i64) { sig1 = () -> i32 sig2 = () -> i32, f32 -ebb0(v0: i64): +block0(v0: i64): v1 = call_indirect sig1, v0() call_indirect sig0, v1(v0) v3, v4 = call_indirect sig2, v1() @@ -74,7 +74,7 @@ function %long_call() { sig0 = () fn0 = %none sig0 -ebb0: +block0: v0 = func_addr.i32 fn0 call_indirect sig0, v0() return @@ -85,10 +85,10 @@ ebb0: ; Special purpose function arguments function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret { -ebb0(v1: i32, v2: i32, v3: i32, v4: i32): +block0(v1: i32, v2: i32, v3: i32, v4: i32): return v4, v2, v3, v1 } ; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret fast { -; check: ebb0(v1: i32, v2: i32, v3: i32, v4: i32): +; check: block0(v1: i32, v2: i32, v3: i32, v4: i32): ; check: return v4, v2, v3, v1 ; check: } diff --git a/cranelift/filetests/filetests/parser/flags.clif b/cranelift/filetests/filetests/parser/flags.clif index aac8017e85..c8d6e78912 100644 --- a/cranelift/filetests/filetests/parser/flags.clif +++ b/cranelift/filetests/filetests/parser/flags.clif @@ -2,63 +2,63 @@ test cat test verifier function %iflags(i32) { -ebb200(v0: i32): +block200(v0: i32): v1 = ifcmp_imm v0, 17 - brif eq v1, ebb201 - jump ebb400 + brif eq v1, block201 + jump block400 -ebb400: - brif ugt v1, ebb202 - jump ebb401 +block400: + brif ugt v1, block202 + jump block401 -ebb401: +block401: v2 = iconst.i32 34 v3 = ifcmp v0, v2 v4 = trueif eq v3 - brnz v4, ebb202 - jump ebb402 + brnz v4, block202 + jump block402 -ebb402: +block402: return -ebb201: +block201: return -ebb202: +block202: trap oob } ; check: v1 = ifcmp_imm v0, 17 -; check: brif eq v1, ebb201 -; check: brif ugt v1, ebb202 +; check: brif eq v1, block201 +; check: brif ugt v1, block202 ; check: v3 = ifcmp.i32 v0, v2 ; check: v4 = trueif eq v3 function %fflags(f32) { -ebb200(v0: f32): +block200(v0: f32): v1 = f32const 0x34.0p0 v2 = ffcmp v0, v1 - brff eq v2, ebb201 - jump ebb400 + brff eq v2, block201 + jump block400 -ebb400: - brff ord v2, ebb202 - jump ebb401 +block400: + brff ord v2, block202 + jump block401 -ebb401: +block401: v3 = trueff gt v2 - brnz v3, ebb202 - jump ebb402 + brnz v3, block202 + jump block402 -ebb402: +block402: return -ebb201: +block201: return -ebb202: +block202: trap oob } ; check: v2 = ffcmp v0, v1 -; check: brff eq v2, ebb201 -; check: brff ord v2, ebb202 +; check: brff eq v2, block201 +; check: brff ord v2, block202 ; check: v3 = trueff gt v2 diff --git a/cranelift/filetests/filetests/parser/instruction_encoding.clif b/cranelift/filetests/filetests/parser/instruction_encoding.clif index 5f7ae26af3..5386808482 100644 --- a/cranelift/filetests/filetests/parser/instruction_encoding.clif +++ b/cranelift/filetests/filetests/parser/instruction_encoding.clif @@ -5,7 +5,7 @@ target riscv32 ; regex: WS=[ \t]* function %foo(i32, i32) { -ebb1(v0: i32 [%x8], v1: i32): +block1(v0: i32 [%x8], v1: i32): [-,-] v2 = iadd v0, v1 [-] trap heap_oob [R#1234, %x5, %x11] v6, v7 = iadd_ifcout v2, v0 @@ -14,7 +14,7 @@ ebb1(v0: i32 [%x8], v1: i32): @a5 [Iret#5] return v0, v8 } ; sameln: function %foo(i32, i32) fast { -; nextln: ebb1(v0: i32 [%x8], v1: i32): +; nextln: block1(v0: i32 [%x8], v1: i32): ; nextln: [-,-]$WS v2 = iadd v0, v1 ; nextln: [-]$WS trap heap_oob ; nextln: [R#1234,%x5,%x11]$WS v6, v7 = iadd_ifcout v2, v0 diff --git a/cranelift/filetests/filetests/parser/memory.clif b/cranelift/filetests/filetests/parser/memory.clif index 4e763f2b4d..ecf872d64f 100644 --- a/cranelift/filetests/filetests/parser/memory.clif +++ b/cranelift/filetests/filetests/parser/memory.clif @@ -4,7 +4,7 @@ test verifier function %vmglobal(i64 vmctx) -> i32 { gv3 = vmctx ; check: gv3 = vmctx -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i32 gv3 ; check: v1 = global_value.i32 gv3 return v1 @@ -17,7 +17,7 @@ function %load_and_add_imm(i64 vmctx) -> i32 { ; check: gv2 = vmctx ; check: gv3 = load.i32 notrap aligned gv2-72 ; check: gv4 = iadd_imm.i32 gv3, -32 -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i32 gv4 ; check: v1 = global_value.i32 gv4 return v1 @@ -31,7 +31,7 @@ function %backref(i64 vmctx) -> i32 { ; check: gv1 = load.i32 notrap aligned gv2 gv2 = vmctx ; check: gv2 = vmctx -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i32 gv1 return v1 } @@ -41,7 +41,7 @@ function %symbol() -> i32 { ; check: gv0 = symbol %something gv1 = symbol u8:9 ; check: gv1 = symbol u8:9 -ebb0: +block0: v0 = global_value.i32 gv0 ; check: v0 = global_value.i32 gv0 v1 = global_value.i32 gv1 @@ -59,7 +59,7 @@ function %sheap(i32, i64 vmctx) -> i64 { ; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 ; check: heap2 = static gv5, min 0, bound 0x0001_0000, offset_guard 4096 -ebb0(v1: i32, v2: i64): +block0(v1: i32, v2: i64): v3 = heap_addr.i64 heap1, v1, 0 ; check: v3 = heap_addr.i64 heap1, v1, 0 return v3 @@ -75,7 +75,7 @@ function %dheap(i32, i64 vmctx) -> i64 { ; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, offset_guard 0x8000_0000 ; check: heap2 = dynamic gv5, min 0, bound gv6, offset_guard 4096 -ebb0(v1: i32, v2: i64): +block0(v1: i32, v2: i64): v3 = heap_addr.i64 heap2, v1, 0 ; check: v3 = heap_addr.i64 heap2, v1, 0 return v3 diff --git a/cranelift/filetests/filetests/parser/rewrite.clif b/cranelift/filetests/filetests/parser/rewrite.clif index c40bd9589b..a0520d25f5 100644 --- a/cranelift/filetests/filetests/parser/rewrite.clif +++ b/cranelift/filetests/filetests/parser/rewrite.clif @@ -1,16 +1,16 @@ -; It is possible to refer to instructions and EBBs that have not yet been +; It is possible to refer to instructions and blocks that have not yet been ; defined in the lexical order. test cat ; Defining numbers. function %defs() { -ebb100(v20: i32): +block100(v20: i32): v1000 = iconst.i32x8 5 v9200 = f64const 0x4.0p0 trap user4 } ; sameln: function %defs() fast { -; nextln: ebb100(v20: i32): +; nextln: block100(v20: i32): ; nextln: v1000 = iconst.i32x8 5 ; nextln: v9200 = f64const 0x1.0000000000000p2 ; nextln: trap user4 @@ -18,14 +18,14 @@ ebb100(v20: i32): ; Using values. function %use_value() { -ebb100(v20: i32): +block100(v20: i32): v1000 = iadd_imm v20, 5 v200 = iadd v20, v1000 - jump ebb100(v1000) + jump block100(v1000) } ; sameln: function %use_value() fast { -; nextln: ebb100(v20: i32): +; nextln: block100(v20: i32): ; nextln: v1000 = iadd_imm v20, 5 ; nextln: v200 = iadd v20, v1000 -; nextln: jump ebb100(v1000) +; nextln: jump block100(v1000) ; nextln: } diff --git a/cranelift/filetests/filetests/parser/ternary.clif b/cranelift/filetests/filetests/parser/ternary.clif index d17e5592c6..b148850198 100644 --- a/cranelift/filetests/filetests/parser/ternary.clif +++ b/cranelift/filetests/filetests/parser/ternary.clif @@ -2,7 +2,7 @@ test cat test verifier function %add_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 { -ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): +block1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): v10, v11 = iadd_ifcout v1, v4 ;check: v10, v11 = iadd_ifcout v1, v4 v20, v21 = iadd_ifcarry v2, v5, v11 @@ -13,7 +13,7 @@ ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): } function %sub_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 { -ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): +block1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): v10, v11 = isub_ifbout v1, v4 ;check: v10, v11 = isub_ifbout v1, v4 v20, v21 = isub_ifborrow v2, v5, v11 diff --git a/cranelift/filetests/filetests/parser/tiny.clif b/cranelift/filetests/filetests/parser/tiny.clif index 49628a27d6..98f477f808 100644 --- a/cranelift/filetests/filetests/parser/tiny.clif +++ b/cranelift/filetests/filetests/parser/tiny.clif @@ -2,24 +2,24 @@ test cat ; The smallest possible function. function %minimal() { -ebb0: +block0: trap user0 } ; sameln: function %minimal() fast { -; nextln: ebb0: +; nextln: block0: ; nextln: trap user0 ; nextln: } ; Create and use values. ; Polymorphic instructions with type suffix. function %ivalues() { -ebb0: +block0: v0 = iconst.i32 2 v1 = iconst.i8 6 v2 = ishl v0, v1 } ; sameln: function %ivalues() fast { -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 2 ; nextln: v1 = iconst.i8 6 ; nextln: v2 = ishl v0, v1 @@ -28,14 +28,14 @@ ebb0: ; Create and use values. ; Polymorphic instructions with type suffix. function %bvalues() { -ebb0: +block0: v0 = bconst.b32 true v1 = bconst.b8 false v2 = bextend.b32 v1 v3 = bxor v0, v2 } ; sameln: function %bvalues() fast { -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = bconst.b32 true ; nextln: v1 = bconst.b8 false ; nextln: v2 = bextend.b32 v1 @@ -44,33 +44,33 @@ ebb0: ; Polymorphic instruction controlled by second operand. function %select() { -ebb0(v90: i32, v91: i32, v92: b1): +block0(v90: i32, v91: i32, v92: b1): v0 = select v92, v90, v91 } ; sameln: function %select() fast { -; nextln: ebb0(v90: i32, v91: i32, v92: b1): +; nextln: block0(v90: i32, v91: i32, v92: b1): ; nextln: v0 = select v92, v90, v91 ; nextln: } ; Polymorphic instruction controlled by third operand. function %selectif() system_v { -ebb0(v95: i32, v96: i32, v97: b1): +block0(v95: i32, v96: i32, v97: b1): v98 = selectif.i32 eq v97, v95, v96 } ; sameln: function %selectif() system_v { -; nextln: ebb0(v95: i32, v96: i32, v97: b1): +; nextln: block0(v95: i32, v96: i32, v97: b1): ; nextln: v98 = selectif.i32 eq v97, v95, v96 ; nextln: } ; Lane indexes. function %lanes() { -ebb0: +block0: v0 = iconst.i32x4 2 v1 = extractlane v0, 3 v2 = insertlane v0, 1, v1 } ; sameln: function %lanes() fast { -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32x4 2 ; nextln: v1 = extractlane v0, 3 ; nextln: v2 = insertlane v0, 1, v1 @@ -78,31 +78,31 @@ ebb0: ; Integer condition codes. function %icmp(i32, i32) { -ebb0(v90: i32, v91: i32): +block0(v90: i32, v91: i32): v0 = icmp eq v90, v91 v1 = icmp ult v90, v91 v2 = icmp_imm sge v90, -12 v3 = irsub_imm v91, 45 - br_icmp eq v90, v91, ebb0(v91, v90) + br_icmp eq v90, v91, block0(v91, v90) } ; sameln: function %icmp(i32, i32) fast { -; nextln: ebb0(v90: i32, v91: i32): +; nextln: block0(v90: i32, v91: i32): ; nextln: v0 = icmp eq v90, v91 ; nextln: v1 = icmp ult v90, v91 ; nextln: v2 = icmp_imm sge v90, -12 ; nextln: v3 = irsub_imm v91, 45 -; nextln: br_icmp eq v90, v91, ebb0(v91, v90) +; nextln: br_icmp eq v90, v91, block0(v91, v90) ; nextln: } ; Floating condition codes. function %fcmp(f32, f32) { -ebb0(v90: f32, v91: f32): +block0(v90: f32, v91: f32): v0 = fcmp eq v90, v91 v1 = fcmp uno v90, v91 v2 = fcmp lt v90, v91 } ; sameln: function %fcmp(f32, f32) fast { -; nextln: ebb0(v90: f32, v91: f32): +; nextln: block0(v90: f32, v91: f32): ; nextln: v0 = fcmp eq v90, v91 ; nextln: v1 = fcmp uno v90, v91 ; nextln: v2 = fcmp lt v90, v91 @@ -111,12 +111,12 @@ ebb0(v90: f32, v91: f32): ; The bitcast instruction has two type variables: The controlling type variable ; controls the outout type, and the input type is a free variable. function %bitcast(i32, f32) { -ebb0(v90: i32, v91: f32): +block0(v90: i32, v91: f32): v0 = bitcast.i8x4 v90 v1 = bitcast.i32 v91 } ; sameln: function %bitcast(i32, f32) fast { -; nextln: ebb0(v90: i32, v91: f32): +; nextln: block0(v90: i32, v91: f32): ; nextln: v0 = bitcast.i8x4 v90 ; nextln: v1 = bitcast.i32 v91 ; nextln: } @@ -129,7 +129,7 @@ function %stack() { ss4 = outgoing_arg 4 ss5 = emergency_slot 4 -ebb0: +block0: v1 = stack_load.i32 ss10 v2 = stack_load.i32 ss10+4 stack_store v1, ss10+2 @@ -142,7 +142,7 @@ ebb0: ; check: ss5 = emergency_slot 4 ; check: ss10 = spill_slot 8 -; check: ebb0: +; check: block0: ; nextln: v1 = stack_load.i32 ss10 ; nextln: v2 = stack_load.i32 ss10+4 ; nextln: stack_store v1, ss10+2 @@ -150,7 +150,7 @@ ebb0: ; Memory access instructions. function %memory(i32) { -ebb0(v1: i32): +block0(v1: i32): v2 = load.i64 v1 v3 = load.i64 aligned v1 v4 = load.i64 notrap v1 @@ -167,7 +167,7 @@ ebb0(v1: i32): store_complex v3, v1+v2+0x1 } ; sameln: function %memory(i32) fast { -; nextln: ebb0(v1: i32): +; nextln: block0(v1: i32): ; nextln: v2 = load.i64 v1 ; nextln: v3 = load.i64 aligned v1 ; nextln: v4 = load.i64 notrap v1 @@ -188,7 +188,7 @@ ebb0(v1: i32): function %diversion(i32) { ss0 = spill_slot 4 -ebb0(v1: i32): +block0(v1: i32): regmove v1, %10 -> %20 regmove v1, %20 -> %10 regspill v1, %10 -> ss0 @@ -197,7 +197,7 @@ ebb0(v1: i32): } ; sameln: function %diversion(i32) fast { ; nextln: ss0 = spill_slot 4 -; check: ebb0(v1: i32): +; check: block0(v1: i32): ; nextln: regmove v1, %10 -> %20 ; nextln: regmove v1, %20 -> %10 ; nextln: regspill v1, %10 -> ss0 @@ -207,20 +207,20 @@ ebb0(v1: i32): ; Register copies. function %copy_special() { -ebb0: +block0: copy_special %10 -> %20 copy_special %20 -> %10 return } ; sameln: function %copy_special() fast { -; nextln: ebb0: +; nextln: block0: ; nextln: copy_special %10 -> %20 ; nextln: copy_special %20 -> %10 ; nextln: return ; nextln: } function %cond_traps(i32) { -ebb0(v0: i32): +block0(v0: i32): trapz v0, stk_ovf v1 = ifcmp_imm v0, 5 trapif ugt v1, oob @@ -230,7 +230,7 @@ ebb0(v0: i32): return } ; sameln: function %cond_traps(i32) -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: trapz v0, stk_ovf ; nextln: v1 = ifcmp_imm v0, 5 ; nextln: trapif ugt v1, oob diff --git a/cranelift/filetests/filetests/postopt/basic.clif b/cranelift/filetests/filetests/postopt/basic.clif index c38065f947..4fb9e9664c 100644 --- a/cranelift/filetests/filetests/postopt/basic.clif +++ b/cranelift/filetests/filetests/postopt/basic.clif @@ -4,29 +4,29 @@ target i686 ; Test that compare+branch sequences are folded effectively on x86. function %br_icmp(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): [DynRexOp1icscc#39,%rdx] v2 = icmp slt v0, v1 -[Op1t8jccd_long#85] brnz v2, ebb1 -[Op1jmpb#eb] jump ebb2 +[Op1t8jccd_long#85] brnz v2, block1 +[Op1jmpb#eb] jump block2 -ebb2: +block2: [Op1ret#c3] return v1 -ebb1: +block1: [Op1pu_id#b8,%rax] v8 = iconst.i32 3 [Op1ret#c3] return v8 } ; sameln: function %br_icmp -; nextln: ebb0(v0: i32, v1: i32): +; nextln: block0(v0: i32, v1: i32): ; nextln: v9 = ifcmp v0, v1 ; nextln: v2 = trueif slt v9 -; nextln: brif slt v9, ebb1 -; nextln: jump ebb2 +; nextln: brif slt v9, block1 +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: return v1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v8 = iconst.i32 3 ; nextln: return v8 ; nextln: } @@ -34,29 +34,29 @@ ebb1: ; Use brz instead of brnz, so the condition is inverted. function %br_icmp_inverse(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): [DynRexOp1icscc#39,%rdx] v2 = icmp slt v0, v1 -[Op1t8jccd_long#84] brz v2, ebb1 -[Op1jmpb#eb] jump ebb2 +[Op1t8jccd_long#84] brz v2, block1 +[Op1jmpb#eb] jump block2 -ebb2: +block2: [Op1ret#c3] return v1 -ebb1: +block1: [Op1pu_id#b8,%rax] v8 = iconst.i32 3 [Op1ret#c3] return v8 } ; sameln: function %br_icmp_inverse -; nextln: ebb0(v0: i32, v1: i32): +; nextln: block0(v0: i32, v1: i32): ; nextln: v9 = ifcmp v0, v1 ; nextln: v2 = trueif slt v9 -; nextln: brif sge v9, ebb1 -; nextln: jump ebb2 +; nextln: brif sge v9, block1 +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: return v1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v8 = iconst.i32 3 ; nextln: return v8 ; nextln: } @@ -64,29 +64,29 @@ ebb1: ; Use icmp_imm instead of icmp. function %br_icmp_imm(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): [DynRexOp1icscc_ib#7083] v2 = icmp_imm slt v0, 2 -[Op1t8jccd_long#84] brz v2, ebb1 -[Op1jmpb#eb] jump ebb2 +[Op1t8jccd_long#84] brz v2, block1 +[Op1jmpb#eb] jump block2 -ebb2: +block2: [Op1ret#c3] return v1 -ebb1: +block1: [Op1pu_id#b8,%rax] v8 = iconst.i32 3 [Op1ret#c3] return v8 } ; sameln: function %br_icmp_imm -; nextln: ebb0(v0: i32, v1: i32): +; nextln: block0(v0: i32, v1: i32): ; nextln: v9 = ifcmp_imm v0, 2 ; nextln: v2 = trueif slt v9 -; nextln: brif sge v9, ebb1 -; nextln: jump ebb2 +; nextln: brif sge v9, block1 +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: return v1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v8 = iconst.i32 3 ; nextln: return v8 ; nextln: } @@ -94,30 +94,30 @@ ebb1: ; Use fcmp instead of icmp. function %br_fcmp(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): [Op2fcscc#42e,%rdx] v2 = fcmp gt v0, v1 -[Op1t8jccd_long#84] brz v2, ebb1 -[Op1jmpb#eb] jump ebb2 +[Op1t8jccd_long#84] brz v2, block1 +[Op1jmpb#eb] jump block2 -ebb2: +block2: [Op1ret#c3] return v1 -ebb1: +block1: [Op1pu_id#b8,%rax] v18 = iconst.i32 0x40a8_0000 [Mp2frurm#56e,%xmm0] v8 = bitcast.f32 v18 [Op1ret#c3] return v8 } ; sameln: function %br_fcmp -; nextln: ebb0(v0: f32, v1: f32): +; nextln: block0(v0: f32, v1: f32): ; nextln: v19 = ffcmp v0, v1 ; nextln: v2 = trueff gt v19 -; nextln: brff ule v19, ebb1 -; nextln: jump ebb2 +; nextln: brff ule v19, block1 +; nextln: jump block2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: return v1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v18 = iconst.i32 0x40a8_0000 ; nextln: v8 = bitcast.f32 v18 ; nextln: return v8 diff --git a/cranelift/filetests/filetests/postopt/complex_memory_ops.clif b/cranelift/filetests/filetests/postopt/complex_memory_ops.clif index bae58cd8bb..43206f431c 100644 --- a/cranelift/filetests/filetests/postopt/complex_memory_ops.clif +++ b/cranelift/filetests/filetests/postopt/complex_memory_ops.clif @@ -2,7 +2,7 @@ test postopt target x86_64 function %dual_loads(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): [DynRexOp1rr#8001] v3 = iadd v0, v1 v4 = load.i64 v3 v5 = uload8.i64 v3 @@ -15,7 +15,7 @@ ebb0(v0: i64, v1: i64): } ; sameln: function %dual_loads -; nextln: ebb0(v0: i64, v1: i64): +; nextln: block0(v0: i64, v1: i64): ; nextln: v3 = iadd v0, v1 ; nextln: v4 = load_complex.i64 v0+v1 ; nextln: v5 = uload8_complex.i64 v0+v1 @@ -28,7 +28,7 @@ ebb0(v0: i64, v1: i64): ; nextln: } function %dual_loads2(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): [DynRexOp1rr#8001] v3 = iadd v0, v1 v4 = load.i64 v3+1 v5 = uload8.i64 v3+1 @@ -41,7 +41,7 @@ ebb0(v0: i64, v1: i64): } ; sameln: function %dual_loads2 -; nextln: ebb0(v0: i64, v1: i64): +; nextln: block0(v0: i64, v1: i64): ; nextln: v3 = iadd v0, v1 ; nextln: v4 = load_complex.i64 v0+v1+1 ; nextln: v5 = uload8_complex.i64 v0+v1+1 @@ -54,7 +54,7 @@ ebb0(v0: i64, v1: i64): ; nextln: } function %dual_stores(i64, i64, i64) { -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): [DynRexOp1rr#8001] v3 = iadd v0, v1 [RexOp1st#8089] store.i64 v2, v3 [RexOp1st#88] istore8.i64 v2, v3 @@ -64,7 +64,7 @@ ebb0(v0: i64, v1: i64, v2: i64): } ; sameln: function %dual_stores -; nextln: ebb0(v0: i64, v1: i64, v2: i64): +; nextln: block0(v0: i64, v1: i64, v2: i64): ; nextln: v3 = iadd v0, v1 ; nextln: store_complex v2, v0+v1 ; nextln: istore8_complex v2, v0+v1 @@ -74,7 +74,7 @@ ebb0(v0: i64, v1: i64, v2: i64): ; nextln: } function %dual_stores2(i64, i64, i64) { -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): [DynRexOp1rr#8001] v3 = iadd v0, v1 [RexOp1stDisp8#8089] store.i64 v2, v3+1 [RexOp1stDisp8#88] istore8.i64 v2, v3+1 @@ -84,7 +84,7 @@ ebb0(v0: i64, v1: i64, v2: i64): } ; sameln: function %dual_stores2 -; nextln: ebb0(v0: i64, v1: i64, v2: i64): +; nextln: block0(v0: i64, v1: i64, v2: i64): ; nextln: v3 = iadd v0, v1 ; nextln: store_complex v2, v0+v1+1 ; nextln: istore8_complex v2, v0+v1+1 diff --git a/cranelift/filetests/filetests/postopt/fold_offset_into_address.clif b/cranelift/filetests/filetests/postopt/fold_offset_into_address.clif index 52379f4a50..1b58caed72 100644 --- a/cranelift/filetests/filetests/postopt/fold_offset_into_address.clif +++ b/cranelift/filetests/filetests/postopt/fold_offset_into_address.clif @@ -4,28 +4,28 @@ target x86_64 ; Fold the immediate of an iadd_imm into an address offset. function u0:0(i64 vmctx) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = iadd_imm.i64 v0, 16 [RexOp1ldDisp8#808b] v2 = load.i64 notrap aligned v1 [Op1ret#c3] return v2 } ; sameln: function u0:0(i64 vmctx) -> i64 fast { -; nextln: ebb0(v0: i64): +; nextln: block0(v0: i64): ; nextln: v1 = iadd_imm v0, 16 ; nextln: [RexOp1ldDisp8#808b] v2 = load.i64 notrap aligned v0+16 ; nextln: [Op1ret#c3] return v2 ; nextln: } function u0:1(i64, i64 vmctx) { -ebb0(v3: i64, v0: i64): +block0(v3: i64, v0: i64): v1 = iadd_imm.i64 v0, 16 [RexOp1stDisp8#8089] store.i64 notrap aligned v3, v1 [Op1ret#c3] return } ; sameln: function u0:1(i64, i64 vmctx) fast { -; nextln: ebb0(v3: i64, v0: i64): +; nextln: block0(v3: i64, v0: i64): ; nextln: v1 = iadd_imm v0, 16 ; nextln: [RexOp1stDisp8#8089] store notrap aligned v3, v0+16 ; nextln: [Op1ret#c3] return diff --git a/cranelift/filetests/filetests/preopt/branch.clif b/cranelift/filetests/filetests/preopt/branch.clif index 8e139952f9..50274c4890 100644 --- a/cranelift/filetests/filetests/preopt/branch.clif +++ b/cranelift/filetests/filetests/preopt/branch.clif @@ -2,78 +2,78 @@ test preopt target x86_64 function %brz_fold() -> i32 { -ebb0: +block0: v0 = bconst.b1 false - brz v0, ebb2 - jump ebb1 -ebb1: + brz v0, block2 + jump block1 +block1: v1 = iconst.i32 42 return v1 -ebb2: +block2: v2 = iconst.i32 24 return v2 } ; sameln: function %brz_fold -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = bconst.b1 false -; nextln: jump ebb2 +; nextln: jump block2 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v1 = iconst.i32 42 ; nextln: return v1 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v2 = iconst.i32 24 ; nextln: return v2 ; nextln: } function %brnz_fold() -> i32 { -ebb0: +block0: v0 = bconst.b1 true - brnz v0, ebb2 - jump ebb1 -ebb1: + brnz v0, block2 + jump block1 +block1: v1 = iconst.i32 42 return v1 -ebb2: +block2: v2 = iconst.i32 24 return v2 } ; sameln: function %brnz_fold -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = bconst.b1 true -; nextln: jump ebb2 +; nextln: jump block2 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v1 = iconst.i32 42 ; nextln: return v1 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v2 = iconst.i32 24 ; nextln: return v2 ; nextln: } function %brz_fold_param(b1) -> i32 { -ebb0(v0: b1): - brz v0, ebb2 - jump ebb1 -ebb1: +block0(v0: b1): + brz v0, block2 + jump block1 +block1: v1 = iconst.i32 42 return v1 -ebb2: +block2: v2 = iconst.i32 24 return v2 } ; sameln: function %brz_fold_param(b1) -> i32 fast { -; nextln: ebb0(v0: b1): -; nextln: brz v0, ebb2 -; nextln: jump ebb1 +; nextln: block0(v0: b1): +; nextln: brz v0, block2 +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v1 = iconst.i32 42 ; nextln: return v1 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v2 = iconst.i32 24 ; nextln: return v2 ; nextln: } diff --git a/cranelift/filetests/filetests/preopt/constant_fold.clif b/cranelift/filetests/filetests/preopt/constant_fold.clif index f0ea9539ba..e2cc3e4562 100644 --- a/cranelift/filetests/filetests/preopt/constant_fold.clif +++ b/cranelift/filetests/filetests/preopt/constant_fold.clif @@ -2,7 +2,7 @@ test preopt target x86_64 function %constant_fold(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = f64const 0x1.0000000000000p0 v2 = f64const 0x1.0000000000000p1 v3 = fadd v1, v2 @@ -10,7 +10,7 @@ ebb0(v0: f64): return v4 } ; sameln: function %constant_fold(f64) -> f64 fast { -; nextln: ebb0(v0: f64): +; nextln: block0(v0: f64): ; nextln: v1 = f64const 0x1.0000000000000p0 ; nextln: v2 = f64const 0x1.0000000000000p1 ; nextln: v3 = f64const 0x1.8000000000000p1 diff --git a/cranelift/filetests/filetests/preopt/numerical.clif b/cranelift/filetests/filetests/preopt/numerical.clif index 27fdaec0f7..044a3df6a0 100644 --- a/cranelift/filetests/filetests/preopt/numerical.clif +++ b/cranelift/filetests/filetests/preopt/numerical.clif @@ -2,7 +2,7 @@ test preopt target x86_64 function %iadd_fold() -> i32 { -ebb0: +block0: v0 = iconst.i32 37 v1 = iconst.i32 5 v2 = iadd v0, v1 @@ -11,7 +11,7 @@ ebb0: return v4 } ; sameln: function %iadd_fold -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 37 ; nextln: v1 = iconst.i32 5 ; nextln: v2 = iconst.i32 42 @@ -21,14 +21,14 @@ ebb0: ; nextln: } function %isub_fold() -> i32 { -ebb0: +block0: v0 = iconst.i32 42 v1 = iconst.i32 1 v2 = isub v0, v1 return v2 } ; sameln: function %isub_fold -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 42 ; nextln: v1 = iconst.i32 1 ; nextln: v2 = iconst.i32 41 diff --git a/cranelift/filetests/filetests/regalloc/aliases.clif b/cranelift/filetests/filetests/regalloc/aliases.clif index 7e6d5c6028..6114298873 100644 --- a/cranelift/filetests/filetests/regalloc/aliases.clif +++ b/cranelift/filetests/filetests/regalloc/aliases.clif @@ -5,11 +5,11 @@ function %value_aliases(i32, f32, i64 vmctx) baldrdash_system_v { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: f32, v2: i64): +block0(v0: i32, v1: f32, v2: i64): v3 = iconst.i32 0 - jump ebb3(v3) + jump block3(v3) -ebb3(v4: i32): +block3(v4: i32): v5 = heap_addr.i64 heap0, v4, 1 v6 = load.f32 v5 v7 -> v1 @@ -21,15 +21,15 @@ ebb3(v4: i32): v12 -> v0 v13 = icmp ult v11, v12 v14 = bint.i32 v13 - brnz v14, ebb3(v11) - jump ebb4 + brnz v14, block3(v11) + jump block4 -ebb4: - jump ebb2 +block4: + jump block2 -ebb2: - jump ebb1 +block2: + jump block1 -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/regalloc/basic.clif b/cranelift/filetests/filetests/regalloc/basic.clif index d10ea726ce..48111253ae 100644 --- a/cranelift/filetests/filetests/regalloc/basic.clif +++ b/cranelift/filetests/filetests/regalloc/basic.clif @@ -6,7 +6,7 @@ target riscv32 ; regex: RX=%x\d+ function %add(i32, i32) { -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): v3 = iadd v1, v2 ; check: [R#0c,%x5] ; sameln: iadd @@ -15,7 +15,7 @@ ebb0(v1: i32, v2: i32): ; Function with a dead argument. function %dead_arg(i32, i32) -> i32{ -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): ; not: regmove ; check: return v1 return v1 @@ -23,7 +23,7 @@ ebb0(v1: i32, v2: i32): ; Return a value from a different register. function %move1(i32, i32) -> i32 { -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): ; not: regmove ; check: regmove v2, %x11 -> %x10 ; nextln: return v2 @@ -32,7 +32,7 @@ ebb0(v1: i32, v2: i32): ; Swap two registers. function %swap(i32, i32) -> i32, i32 { -ebb0(v1: i32, v2: i32): +block0(v1: i32, v2: i32): ; not: regmove ; check: regmove v2, %x11 -> $(tmp=$RX) ; nextln: regmove v1, %x10 -> %x11 @@ -41,40 +41,40 @@ ebb0(v1: i32, v2: i32): return v2, v1 } -; Return an EBB argument. -function %retebb(i32, i32) -> i32 { -ebb0(v1: i32, v2: i32): - brnz v1, ebb1(v1) - jump ebb1(v2) +; Return a block argument. +function %retblock(i32, i32) -> i32 { +block0(v1: i32, v2: i32): + brnz v1, block1(v1) + jump block1(v2) -ebb1(v10: i32): +block1(v10: i32): return v10 } -; Pass an EBB argument as a function argument. -function %callebb(i32, i32) -> i32 { +; Pass a block argument as a function argument. +function %callblock(i32, i32) -> i32 { fn0 = %foo(i32) -> i32 -ebb0(v1: i32, v2: i32): - brnz v1, ebb1(v1) - jump ebb1(v2) +block0(v1: i32, v2: i32): + brnz v1, block1(v1) + jump block1(v2) -ebb1(v10: i32): +block1(v10: i32): v11 = call fn0(v10) return v11 } -; Pass an EBB argument as a jump argument. -function %jumpebb(i32, i32) -> i32 { +; Pass a block argument as a jump argument. +function %jumpblock(i32, i32) -> i32 { fn0 = %foo(i32) -> i32 -ebb0(v1: i32, v2: i32): - brnz v1, ebb1(v1, v2) - jump ebb1(v2, v1) +block0(v1: i32, v2: i32): + brnz v1, block1(v1, v2) + jump block1(v2, v1) -ebb1(v10: i32, v11: i32): - jump ebb2(v10, v11) +block1(v10: i32, v11: i32): + jump block2(v10, v11) -ebb2(v20: i32, v21: i32): +block2(v20: i32, v21: i32): return v21 } diff --git a/cranelift/filetests/filetests/regalloc/coalesce.clif b/cranelift/filetests/filetests/regalloc/coalesce.clif index 3c139e73f6..48395da1b3 100644 --- a/cranelift/filetests/filetests/regalloc/coalesce.clif +++ b/cranelift/filetests/filetests/regalloc/coalesce.clif @@ -5,64 +5,64 @@ target riscv32 ; regex: V=v\d+ ; regex: WS=\s+ ; regex: LOC=%\w+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ ; This function is already CSSA, so no copies should be inserted. function %cssa(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): ; not: copy ; v0 is used by the branch and passed as an arg - that's no conflict. - brnz v0, ebb1(v0) - jump ebb2 + brnz v0, block1(v0) + jump block2 -ebb2: +block2: ; v0 is live across the branch above. That's no conflict. v1 = iadd_imm v0, 7 - jump ebb1(v1) + jump block1(v1) -ebb1(v10: i32): +block1(v10: i32): v11 = iadd_imm v10, 7 return v11 } function %trivial(i32) -> i32 { -ebb0(v0: i32): - ; check: brnz v0, $(splitEdge=$EBB) - brnz v0, ebb1(v0) - jump ebb2 +block0(v0: i32): + ; check: brnz v0, $(splitEdge=$BB) + brnz v0, block1(v0) + jump block2 -ebb2: +block2: ; not: copy v1 = iadd_imm v0, 7 - jump ebb1(v1) + jump block1(v1) ; check: $splitEdge: ; nextln: $(cp1=$V) = copy.i32 v0 - ; nextln: jump ebb1($cp1) + ; nextln: jump block1($cp1) -ebb1(v10: i32): - ; Use v0 in the destination EBB causes a conflict. +block1(v10: i32): + ; Use v0 in the destination block causes a conflict. v11 = iadd v10, v0 return v11 } ; A value is used as an SSA argument twice in the same branch. function %dualuse(i32) -> i32 { -ebb0(v0: i32): - ; check: brnz v0, $(splitEdge=$EBB) - brnz v0, ebb1(v0, v0) - jump ebb2 +block0(v0: i32): + ; check: brnz v0, $(splitEdge=$BB) + brnz v0, block1(v0, v0) + jump block2 -ebb2: +block2: v1 = iadd_imm v0, 7 v2 = iadd_imm v1, 56 - jump ebb1(v1, v2) + jump block1(v1, v2) ; check: $splitEdge: ; check: $(cp1=$V) = copy.i32 v0 - ; nextln: jump ebb1($cp1, v0) + ; nextln: jump block1($cp1, v0) -ebb1(v10: i32, v11: i32): +block1(v10: i32, v11: i32): v12 = iadd v10, v11 return v12 } @@ -70,26 +70,26 @@ ebb1(v10: i32, v11: i32): ; Interference away from the branch ; The interference can be broken with a copy at either branch. function %interference(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): ; not: copy - ; check: brnz v0, $(splitEdge=$EBB) + ; check: brnz v0, $(splitEdge=$BB) ; not: copy - brnz v0, ebb1(v0) - jump ebb2 + brnz v0, block1(v0) + jump block2 -ebb2: +block2: v1 = iadd_imm v0, 7 ; v1 and v0 interfere here: v2 = iadd_imm v0, 8 ; check: $(cp0=$V) = copy v1 - ; check: jump ebb1($cp0) - jump ebb1(v1) + ; check: jump block1($cp0) + jump block1(v1) ; check: $splitEdge: ; not: copy - ; nextln: jump ebb1(v0) + ; nextln: jump block1(v0) -ebb1(v10: i32): +block1(v10: i32): ; not: copy v11 = iadd_imm v10, 7 return v11 @@ -97,27 +97,27 @@ ebb1(v10: i32): ; A loop where one induction variable is used as a backedge argument. function %fibonacci(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 1 v2 = iconst.i32 2 - jump ebb1(v1, v2) + jump block1(v1, v2) - ; check: $(splitEdge=$EBB): + ; check: $(splitEdge=$BB): ; check: $(nv11b=$V) = copy.i32 v11 ; not: copy - ; check: jump ebb1($nv11b, v12) + ; check: jump block1($nv11b, v12) -ebb1(v10: i32, v11: i32): +block1(v10: i32, v11: i32): ; v11 needs to be isolated because it interferes with v10. - ; check: ebb1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC]) + ; check: block1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC]) ; check: v11 = copy $nv11a v12 = iadd v10, v11 v13 = icmp ult v12, v0 ; check: brnz v13, $splitEdge - brnz v13, ebb1(v11, v12) - jump ebb2 + brnz v13, block1(v11, v12) + jump block2 -ebb2: +block2: return v12 } @@ -128,30 +128,30 @@ ebb2: function %stackarg(i32, i32, i32, i32, i32, i32, i32, i32, i32) -> i32 { ; check: ss0 = incoming_arg 4 ; not: incoming_arg -ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32): +block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32): ; check: fill v8 ; not: v8 - jump ebb1(v8) + jump block1(v8) -ebb1(v10: i32): +block1(v10: i32): v11 = iadd_imm v10, 1 return v11 } function %gvn_unremovable_phi(i32) system_v { -ebb0(v0: i32): +block0(v0: i32): v2 = iconst.i32 0 - jump ebb2(v2, v0) + jump block2(v2, v0) -ebb2(v3: i32, v4: i32): - brnz v3, ebb2(v3, v4) - jump ebb3 +block2(v3: i32, v4: i32): + brnz v3, block2(v3, v4) + jump block3 -ebb3: +block3: v5 = iconst.i32 1 - brnz v3, ebb2(v2, v5) - jump ebb4 + brnz v3, block2(v2, v5) + jump block4 -ebb4: +block4: return } diff --git a/cranelift/filetests/filetests/regalloc/coalescing-207.clif b/cranelift/filetests/filetests/regalloc/coalescing-207.clif index 6af9fcd144..39ddf0fa88 100644 --- a/cranelift/filetests/filetests/regalloc/coalescing-207.clif +++ b/cranelift/filetests/filetests/regalloc/coalescing-207.clif @@ -15,19 +15,19 @@ function %pr207(i64 vmctx, i32, i32) -> i32 system_v { fn1 = u0:0 sig1 fn2 = u0:1 sig2 -ebb0(v0: i64, v1: i32, v2: i32): +block0(v0: i64, v1: i32, v2: i32): v3 = iconst.i32 0 v4 = iconst.i32 0 v5 = iconst.i32 0 v6 = iconst.i32 0x4ffe v7 = icmp uge v5, v6 - brz v7, ebb1 - jump ebb100 + brz v7, block1 + jump block100 -ebb100: +block100: trap heap_oob -ebb1: +block1: v8 = uextend.i64 v5 v9 = iadd_imm.i64 v0, -8 v10 = load.i64 v9 @@ -37,13 +37,13 @@ ebb1: v14 = isub v12, v13 v15 = iconst.i32 0x4ffe v16 = icmp.i32 uge v4, v15 - brz v16, ebb2 - jump ebb101 + brz v16, block2 + jump block101 -ebb101: +block101: trap heap_oob -ebb2: +block2: v17 = uextend.i64 v4 v18 = iadd_imm.i64 v0, -8 v19 = load.i64 v18 @@ -51,13 +51,13 @@ ebb2: store.i32 v14, v20+4 v21 = iconst.i32 0x4ffe v22 = icmp.i32 uge v2, v21 - brz v22, ebb3 - jump ebb102 + brz v22, block3 + jump block102 -ebb102: +block102: trap heap_oob -ebb3: +block3: v23 = uextend.i64 v2 v24 = iadd_imm.i64 v0, -8 v25 = load.i64 v24 @@ -68,28 +68,28 @@ ebb3: v30 = iconst.i32 0 v31 = icmp eq v29, v30 v32 = bint.i32 v31 - brnz v32, ebb90(v14, v1) - jump ebb103 + brnz v32, block90(v14, v1) + jump block103 -ebb103: +block103: v33 = call fn0(v0, v1, v27) v34 = iconst.i32 0 v35 = iconst.i32 0 v36 = icmp eq v33, v35 v37 = bint.i32 v36 - brnz v37, ebb90(v14, v34) - jump ebb104 + brnz v37, block90(v14, v34) + jump block104 -ebb104: +block104: v38 = iconst.i32 0x4ffe v39 = icmp.i32 uge v2, v38 - brz v39, ebb4 - jump ebb105 + brz v39, block4 + jump block105 -ebb105: +block105: trap heap_oob -ebb4: +block4: v40 = uextend.i64 v2 v41 = iadd_imm.i64 v0, -8 v42 = load.i64 v41 @@ -98,19 +98,19 @@ ebb4: v45 = iconst.i32 0 v46 = icmp eq v44, v45 v47 = bint.i32 v46 - brnz v47, ebb56(v33, v14) - jump ebb106 + brnz v47, block56(v33, v14) + jump block106 -ebb106: +block106: v48 = iconst.i32 0x4ffe v49 = icmp.i32 uge v33, v48 - brz v49, ebb5 - jump ebb107 + brz v49, block5 + jump block107 -ebb107: +block107: trap heap_oob -ebb5: +block5: v50 = uextend.i64 v33 v51 = iadd_imm.i64 v0, -8 v52 = load.i64 v51 @@ -119,19 +119,19 @@ ebb5: v55 = iconst.i32 0 v56 = icmp eq v54, v55 v57 = bint.i32 v56 - brnz v57, ebb90(v14, v34) - jump ebb108 + brnz v57, block90(v14, v34) + jump block108 -ebb108: +block108: v58 = iconst.i32 0x4ffe v59 = icmp.i32 uge v2, v58 - brz v59, ebb6 - jump ebb109 + brz v59, block6 + jump block109 -ebb109: +block109: trap heap_oob -ebb6: +block6: v60 = uextend.i64 v2 v61 = iadd_imm.i64 v0, -8 v62 = load.i64 v61 @@ -140,19 +140,19 @@ ebb6: v65 = iconst.i32 0 v66 = icmp eq v64, v65 v67 = bint.i32 v66 - brnz v67, ebb42 - jump ebb110 + brnz v67, block42 + jump block110 -ebb110: +block110: v68 = iconst.i32 0x4ffe v69 = icmp.i32 uge v33, v68 - brz v69, ebb7 - jump ebb111 + brz v69, block7 + jump block111 -ebb111: +block111: trap heap_oob -ebb7: +block7: v70 = uextend.i64 v33 v71 = iadd_imm.i64 v0, -8 v72 = load.i64 v71 @@ -161,19 +161,19 @@ ebb7: v75 = iconst.i32 0 v76 = icmp eq v74, v75 v77 = bint.i32 v76 - brnz v77, ebb90(v14, v34) - jump ebb112 + brnz v77, block90(v14, v34) + jump block112 -ebb112: +block112: v78 = iconst.i32 0x4ffe v79 = icmp.i32 uge v2, v78 - brz v79, ebb8 - jump ebb113 + brz v79, block8 + jump block113 -ebb113: +block113: trap heap_oob -ebb8: +block8: v80 = uextend.i64 v2 v81 = iadd_imm.i64 v0, -8 v82 = load.i64 v81 @@ -182,19 +182,19 @@ ebb8: v85 = iconst.i32 0 v86 = icmp eq v84, v85 v87 = bint.i32 v86 - brnz v87, ebb46 - jump ebb114 + brnz v87, block46 + jump block114 -ebb114: +block114: v88 = iconst.i32 0x4ffe v89 = icmp.i32 uge v33, v88 - brz v89, ebb9 - jump ebb115 + brz v89, block9 + jump block115 -ebb115: +block115: trap heap_oob -ebb9: +block9: v90 = uextend.i64 v33 v91 = iadd_imm.i64 v0, -8 v92 = load.i64 v91 @@ -203,19 +203,19 @@ ebb9: v95 = iconst.i32 0 v96 = icmp eq v94, v95 v97 = bint.i32 v96 - brnz v97, ebb90(v14, v34) - jump ebb116 + brnz v97, block90(v14, v34) + jump block116 -ebb116: +block116: v98 = iconst.i32 0x4ffe v99 = icmp.i32 uge v2, v98 - brz v99, ebb10 - jump ebb117 + brz v99, block10 + jump block117 -ebb117: +block117: trap heap_oob -ebb10: +block10: v100 = uextend.i64 v2 v101 = iadd_imm.i64 v0, -8 v102 = load.i64 v101 @@ -224,10 +224,10 @@ ebb10: v105 = iconst.i32 0 v106 = icmp eq v104, v105 v107 = bint.i32 v106 - brnz v107, ebb54 - jump ebb118 + brnz v107, block54 + jump block118 -ebb118: +block118: v108 = iconst.i32 1 v109 = iadd.i32 v2, v108 v110 = iconst.i32 1048 @@ -235,13 +235,13 @@ ebb118: v112 = iconst.i64 0 v113 = iconst.i32 0x4ffe v114 = icmp uge v111, v113 - brz v114, ebb11 - jump ebb119 + brz v114, block11 + jump block119 -ebb119: +block119: trap heap_oob -ebb11: +block11: v115 = uextend.i64 v111 v116 = iadd_imm.i64 v0, -8 v117 = load.i64 v116 @@ -252,13 +252,13 @@ ebb11: v121 = iconst.i64 0 v122 = iconst.i32 0x4ffe v123 = icmp uge v120, v122 - brz v123, ebb12 - jump ebb120 + brz v123, block12 + jump block120 -ebb120: +block120: trap heap_oob -ebb12: +block12: v124 = uextend.i64 v120 v125 = iadd_imm.i64 v0, -8 v126 = load.i64 v125 @@ -267,13 +267,13 @@ ebb12: v128 = iconst.i64 0 v129 = iconst.i32 0x4ffe v130 = icmp.i32 uge v14, v129 - brz v130, ebb13 - jump ebb121 + brz v130, block13 + jump block121 -ebb121: +block121: trap heap_oob -ebb13: +block13: v131 = uextend.i64 v14 v132 = iadd_imm.i64 v0, -8 v133 = load.i64 v132 @@ -282,34 +282,34 @@ ebb13: v135 = iconst.i64 0 v136 = iconst.i32 0x4ffe v137 = icmp.i32 uge v14, v136 - brz v137, ebb14 - jump ebb122 + brz v137, block14 + jump block122 -ebb122: +block122: trap heap_oob -ebb14: +block14: v138 = uextend.i64 v14 v139 = iadd_imm.i64 v0, -8 v140 = load.i64 v139 v141 = iadd v140, v138 store.i64 v135, v141+1024 v142 = iconst.i32 -1 - jump ebb15(v142, v27) + jump block15(v142, v27) -ebb15(v143: i32, v144: i32): +block15(v143: i32, v144: i32): v145 = iadd.i32 v33, v143 v146 = iconst.i32 1 v147 = iadd v145, v146 v148 = iconst.i32 0x4ffe v149 = icmp uge v147, v148 - brz v149, ebb16 - jump ebb123 + brz v149, block16 + jump block123 -ebb123: +block123: trap heap_oob -ebb16: +block16: v150 = uextend.i64 v147 v151 = iadd_imm.i64 v0, -8 v152 = load.i64 v151 @@ -318,10 +318,10 @@ ebb16: v155 = iconst.i32 0 v156 = icmp eq v154, v155 v157 = bint.i32 v156 - brnz v157, ebb89(v14) - jump ebb124 + brnz v157, block89(v14) + jump block124 -ebb124: +block124: v158 = iconst.i32 255 v159 = band.i32 v144, v158 v160 = iconst.i32 2 @@ -331,13 +331,13 @@ ebb124: v164 = iadd.i32 v143, v163 v165 = iconst.i32 0x4ffe v166 = icmp uge v162, v165 - brz v166, ebb17 - jump ebb125 + brz v166, block17 + jump block125 -ebb125: +block125: trap heap_oob -ebb17: +block17: v167 = uextend.i64 v162 v168 = iadd_imm.i64 v0, -8 v169 = load.i64 v168 @@ -352,13 +352,13 @@ ebb17: v177 = iadd v172, v176 v178 = iconst.i32 0x4ffe v179 = icmp uge v177, v178 - brz v179, ebb18 - jump ebb126 + brz v179, block18 + jump block126 -ebb126: +block126: trap heap_oob -ebb18: +block18: v180 = uextend.i64 v177 v181 = iadd_imm.i64 v0, -8 v182 = load.i64 v181 @@ -371,13 +371,13 @@ ebb18: v189 = bor v184, v188 v190 = iconst.i32 0x4ffe v191 = icmp.i32 uge v177, v190 - brz v191, ebb19 - jump ebb127 + brz v191, block19 + jump block127 -ebb127: +block127: trap heap_oob -ebb19: +block19: v192 = uextend.i64 v177 v193 = iadd_imm.i64 v0, -8 v194 = load.i64 v193 @@ -390,22 +390,22 @@ ebb19: v200 = iadd v196, v199 v201 = iconst.i32 0x4ffe v202 = icmp uge v200, v201 - brz v202, ebb20 - jump ebb128 + brz v202, block20 + jump block128 -ebb128: +block128: trap heap_oob -ebb20: +block20: v203 = uextend.i64 v200 v204 = iadd_imm.i64 v0, -8 v205 = load.i64 v204 v206 = iadd v205, v203 v207 = uload8.i32 v206 - brnz v207, ebb15(v198, v207) - jump ebb21 + brnz v207, block15(v198, v207) + jump block21 -ebb21: +block21: v208 = iconst.i32 -1 v209 = iconst.i32 1 v210 = iconst.i32 -1 @@ -415,31 +415,31 @@ ebb21: v214 = iconst.i32 2 v215 = icmp ult v213, v214 v216 = bint.i32 v215 - brnz v216, ebb38(v2, v211, v209, v210, v208, v198, v213, v33, v14) - jump ebb129 + brnz v216, block38(v2, v211, v209, v210, v208, v198, v213, v33, v14) + jump block129 -ebb129: +block129: v217 = iconst.i32 -1 v218 = iconst.i32 0 v219 = iconst.i32 1 v220 = iconst.i32 1 v221 = iconst.i32 1 v222 = copy.i32 v44 - jump ebb22(v217, v221, v44, v220, v218, v219, v213, v222, v198, v33, v14) + jump block22(v217, v221, v44, v220, v218, v219, v213, v222, v198, v33, v14) -ebb22(v223: i32, v224: i32, v225: i32, v226: i32, v227: i32, v228: i32, v229: i32, v230: i32, v231: i32, v232: i32, v233: i32): +block22(v223: i32, v224: i32, v225: i32, v226: i32, v227: i32, v228: i32, v229: i32, v230: i32, v231: i32, v232: i32, v233: i32): v234 = copy v228 v235 = iadd v223, v224 v236 = iadd.i32 v2, v235 v237 = iconst.i32 0x4ffe v238 = icmp uge v236, v237 - brz v238, ebb23 - jump ebb130 + brz v238, block23 + jump block130 -ebb130: +block130: trap heap_oob -ebb23: +block23: v239 = uextend.i64 v236 v240 = iadd_imm.i64 v0, -8 v241 = load.i64 v240 @@ -449,95 +449,95 @@ ebb23: v245 = band.i32 v225, v244 v246 = icmp ne v243, v245 v247 = bint.i32 v246 - brnz v247, ebb24 - jump ebb131 + brnz v247, block24 + jump block131 -ebb131: +block131: v248 = icmp.i32 ne v224, v226 v249 = bint.i32 v248 - brnz v249, ebb25 - jump ebb132 + brnz v249, block25 + jump block132 -ebb132: +block132: v250 = iadd.i32 v227, v226 v251 = iconst.i32 1 - jump ebb27(v251, v250, v223, v226) + jump block27(v251, v250, v223, v226) -ebb24: +block24: v252 = icmp.i32 ule v243, v245 v253 = bint.i32 v252 - brnz v253, ebb26 - jump ebb133 + brnz v253, block26 + jump block133 -ebb133: +block133: v254 = isub.i32 v234, v223 v255 = iconst.i32 1 - jump ebb27(v255, v234, v223, v254) + jump block27(v255, v234, v223, v254) -ebb25: +block25: v256 = iconst.i32 1 v257 = iadd.i32 v224, v256 v258 = copy.i32 v227 - jump ebb27(v257, v258, v223, v226) + jump block27(v257, v258, v223, v226) -ebb26: +block26: v259 = iconst.i32 1 v260 = iconst.i32 1 v261 = iadd.i32 v227, v260 v262 = iconst.i32 1 v263 = copy.i32 v227 - jump ebb27(v259, v261, v263, v262) + jump block27(v259, v261, v263, v262) -ebb27(v264: i32, v265: i32, v266: i32, v267: i32): +block27(v264: i32, v265: i32, v266: i32, v267: i32): v268 = iadd v264, v265 v269 = icmp uge v268, v229 v270 = bint.i32 v269 - brnz v270, ebb29 - jump ebb134 + brnz v270, block29 + jump block134 -ebb134: +block134: v271 = iadd.i32 v2, v268 v272 = iconst.i32 0x4ffe v273 = icmp uge v271, v272 - brz v273, ebb28 - jump ebb135 + brz v273, block28 + jump block135 -ebb135: +block135: trap heap_oob -ebb28: +block28: v274 = uextend.i64 v271 v275 = iadd_imm.i64 v0, -8 v276 = load.i64 v275 v277 = iadd v276, v274 v278 = uload8.i32 v277 v279 = copy.i32 v265 - jump ebb22(v266, v264, v278, v267, v279, v268, v229, v230, v231, v232, v233) + jump block22(v266, v264, v278, v267, v279, v268, v229, v230, v231, v232, v233) -ebb29: - jump ebb30 +block29: + jump block30 -ebb30: +block30: v280 = iconst.i32 -1 v281 = iconst.i32 0 v282 = iconst.i32 1 v283 = iconst.i32 1 v284 = iconst.i32 1 - jump ebb31(v280, v284, v230, v283, v281, v282, v229, v267, v266, v231, v232, v233) + jump block31(v280, v284, v230, v283, v281, v282, v229, v267, v266, v231, v232, v233) -ebb31(v285: i32, v286: i32, v287: i32, v288: i32, v289: i32, v290: i32, v291: i32, v292: i32, v293: i32, v294: i32, v295: i32, v296: i32): +block31(v285: i32, v286: i32, v287: i32, v288: i32, v289: i32, v290: i32, v291: i32, v292: i32, v293: i32, v294: i32, v295: i32, v296: i32): v297 = copy v290 v298 = iadd v285, v286 v299 = iadd.i32 v2, v298 v300 = iconst.i32 0x4ffe v301 = icmp uge v299, v300 - brz v301, ebb32 - jump ebb136 + brz v301, block32 + jump block136 -ebb136: +block136: trap heap_oob -ebb32: +block32: v302 = uextend.i64 v299 v303 = iadd_imm.i64 v0, -8 v304 = load.i64 v303 @@ -547,105 +547,105 @@ ebb32: v308 = band.i32 v287, v307 v309 = icmp ne v306, v308 v310 = bint.i32 v309 - brnz v310, ebb33 - jump ebb137 + brnz v310, block33 + jump block137 -ebb137: +block137: v311 = icmp.i32 ne v286, v288 v312 = bint.i32 v311 - brnz v312, ebb34 - jump ebb138 + brnz v312, block34 + jump block138 -ebb138: +block138: v313 = iadd.i32 v289, v288 v314 = iconst.i32 1 - jump ebb36(v314, v313, v285, v288) + jump block36(v314, v313, v285, v288) -ebb33: +block33: v315 = icmp.i32 uge v306, v308 v316 = bint.i32 v315 - brnz v316, ebb35 - jump ebb139 + brnz v316, block35 + jump block139 -ebb139: +block139: v317 = isub.i32 v297, v285 v318 = iconst.i32 1 - jump ebb36(v318, v297, v285, v317) + jump block36(v318, v297, v285, v317) -ebb34: +block34: v319 = iconst.i32 1 v320 = iadd.i32 v286, v319 v321 = copy.i32 v289 - jump ebb36(v320, v321, v285, v288) + jump block36(v320, v321, v285, v288) -ebb35: +block35: v322 = iconst.i32 1 v323 = iconst.i32 1 v324 = iadd.i32 v289, v323 v325 = iconst.i32 1 v326 = copy.i32 v289 - jump ebb36(v322, v324, v326, v325) + jump block36(v322, v324, v326, v325) -ebb36(v327: i32, v328: i32, v329: i32, v330: i32): +block36(v327: i32, v328: i32, v329: i32, v330: i32): v331 = iadd v327, v328 v332 = icmp uge v331, v291 v333 = bint.i32 v332 - brnz v333, ebb38(v2, v330, v292, v329, v293, v294, v291, v295, v296) - jump ebb140 + brnz v333, block38(v2, v330, v292, v329, v293, v294, v291, v295, v296) + jump block140 -ebb140: +block140: v334 = iadd.i32 v2, v331 v335 = iconst.i32 0x4ffe v336 = icmp uge v334, v335 - brz v336, ebb37 - jump ebb141 + brz v336, block37 + jump block141 -ebb141: +block141: trap heap_oob -ebb37: +block37: v337 = uextend.i64 v334 v338 = iadd_imm.i64 v0, -8 v339 = load.i64 v338 v340 = iadd v339, v337 v341 = uload8.i32 v340 v342 = copy.i32 v328 - jump ebb31(v329, v327, v341, v330, v342, v331, v291, v292, v293, v294, v295, v296) + jump block31(v329, v327, v341, v330, v342, v331, v291, v292, v293, v294, v295, v296) -ebb38(v343: i32, v344: i32, v345: i32, v346: i32, v347: i32, v348: i32, v349: i32, v350: i32, v351: i32): +block38(v343: i32, v344: i32, v345: i32, v346: i32, v347: i32, v348: i32, v349: i32, v350: i32, v351: i32): v352 = iconst.i32 1 v353 = iadd v346, v352 v354 = iconst.i32 1 v355 = iadd v347, v354 v356 = icmp ugt v353, v355 v357 = bint.i32 v356 - brnz v357, ebb39(v344) - jump ebb142 + brnz v357, block39(v344) + jump block142 -ebb142: +block142: v358 = copy v345 - jump ebb39(v358) + jump block39(v358) -ebb39(v359: i32): +block39(v359: i32): v360 = iadd.i32 v343, v359 - brnz.i32 v357, ebb40(v346) - jump ebb143 + brnz.i32 v357, block40(v346) + jump block143 -ebb143: +block143: v361 = copy.i32 v347 - jump ebb40(v361) + jump block40(v361) -ebb40(v362: i32): +block40(v362: i32): v363 = iconst.i32 1 v364 = iadd v362, v363 v365 = call fn1(v0, v343, v360, v364) v366 = iconst.i32 0 v367 = icmp eq v365, v366 v368 = bint.i32 v367 - brnz v368, ebb63 - jump ebb144 + brnz v368, block63 + jump block144 -ebb144: +block144: v369 = iconst.i32 1 v370 = iadd v362, v369 v371 = isub.i32 v348, v370 @@ -654,40 +654,40 @@ ebb144: v374 = icmp ugt v362, v373 v375 = bint.i32 v374 v376 = copy v362 - brnz v375, ebb41(v376) - jump ebb145 + brnz v375, block41(v376) + jump block145 -ebb145: +block145: v377 = copy v373 - jump ebb41(v377) + jump block41(v377) -ebb41(v378: i32): +block41(v378: i32): v379 = iconst.i32 1 v380 = iadd v378, v379 v381 = iconst.i32 0 - jump ebb64(v380, v381) + jump block64(v380, v381) -ebb42: +block42: v382 = iconst.i32 8 v383 = ishl.i32 v29, v382 v384 = bor v383, v44 v385 = iconst.i32 0x4ffe v386 = icmp.i32 uge v33, v385 - brz v386, ebb43 - jump ebb146 + brz v386, block43 + jump block146 -ebb146: +block146: trap heap_oob -ebb43: +block43: v387 = uextend.i64 v33 v388 = iadd_imm.i64 v0, -8 v389 = load.i64 v388 v390 = iadd v389, v387 v391 = uload8.i32 v390 - jump ebb44(v391, v54, v33) + jump block44(v391, v54, v33) -ebb44(v392: i32, v393: i32, v394: i32): +block44(v392: i32, v393: i32, v394: i32): v395 = iconst.i32 8 v396 = ishl v392, v395 v397 = iconst.i32 0xff00 @@ -697,32 +697,32 @@ ebb44(v392: i32, v393: i32, v394: i32): v401 = bor v398, v400 v402 = icmp eq v401, v384 v403 = bint.i32 v402 - brnz v403, ebb56(v394, v14) - jump ebb147 + brnz v403, block56(v394, v14) + jump block147 -ebb147: +block147: v404 = iconst.i32 2 v405 = iadd v394, v404 v406 = iconst.i32 1 v407 = iadd v394, v406 v408 = iconst.i32 0x4ffe v409 = icmp uge v405, v408 - brz v409, ebb45 - jump ebb148 + brz v409, block45 + jump block148 -ebb148: +block148: trap heap_oob -ebb45: +block45: v410 = uextend.i64 v405 v411 = iadd_imm.i64 v0, -8 v412 = load.i64 v411 v413 = iadd v412, v410 v414 = uload8.i32 v413 - brnz v414, ebb44(v401, v414, v407) - jump ebb90(v14, v34) + brnz v414, block44(v401, v414, v407) + jump block90(v14, v34) -ebb46: +block46: v415 = iconst.i32 8 v416 = ishl.i32 v74, v415 v417 = iconst.i32 16 @@ -730,13 +730,13 @@ ebb46: v419 = bor v416, v418 v420 = iconst.i32 0x4ffe v421 = icmp.i32 uge v33, v420 - brz v421, ebb47 - jump ebb149 + brz v421, block47 + jump block149 -ebb149: +block149: trap heap_oob -ebb47: +block47: v422 = uextend.i64 v33 v423 = iadd_imm.i64 v0, -8 v424 = load.i64 v423 @@ -755,23 +755,23 @@ ebb47: v437 = bor v434, v436 v438 = icmp eq v429, v437 v439 = bint.i32 v438 - brnz v439, ebb56(v33, v14) - jump ebb48(v33, v429) + brnz v439, block56(v33, v14) + jump block48(v33, v429) -ebb48(v440: i32, v441: i32): +block48(v440: i32, v441: i32): v442 = iconst.i32 1 v443 = iadd v440, v442 v444 = iconst.i32 3 v445 = iadd v440, v444 v446 = iconst.i32 0x4ffe v447 = icmp uge v445, v446 - brz v447, ebb49 - jump ebb150 + brz v447, block49 + jump block150 -ebb150: +block150: trap heap_oob -ebb49: +block49: v448 = uextend.i64 v445 v449 = iadd_imm.i64 v0, -8 v450 = load.i64 v449 @@ -780,52 +780,52 @@ ebb49: v453 = iconst.i32 0 v454 = icmp eq v452, v453 v455 = bint.i32 v454 - brnz v455, ebb51(v14) - jump ebb151 + brnz v455, block51(v14) + jump block151 -ebb151: +block151: v456 = bor.i32 v441, v452 v457 = iconst.i32 8 v458 = ishl v456, v457 v459 = icmp ne v458, v437 v460 = bint.i32 v459 v461 = copy.i32 v443 - brnz v460, ebb48(v461, v458) - jump ebb50 + brnz v460, block48(v461, v458) + jump block50 -ebb50: - jump ebb51(v14) +block50: + jump block51(v14) -ebb51(v462: i32): +block51(v462: i32): v463 = iconst.i32 0 v464 = iconst.i32 1056 v465 = iadd v462, v464 v466 = iconst.i32 0x4ffe v467 = icmp uge v463, v466 - brz v467, ebb52 - jump ebb152 + brz v467, block52 + jump block152 -ebb152: +block152: trap heap_oob -ebb52: +block52: v468 = uextend.i64 v463 v469 = iadd_imm.i64 v0, -8 v470 = load.i64 v469 v471 = iadd v470, v468 store.i32 v465, v471+4 v472 = iconst.i32 0 - brnz.i32 v452, ebb53(v443) - jump ebb153 + brnz.i32 v452, block53(v443) + jump block153 -ebb153: +block153: v473 = copy v472 - jump ebb53(v473) + jump block53(v473) -ebb53(v474: i32): +block53(v474: i32): return v474 -ebb54: +block54: v475 = iconst.i32 8 v476 = ishl.i32 v74, v475 v477 = iconst.i32 16 @@ -834,13 +834,13 @@ ebb54: v480 = bor v479, v94 v481 = iconst.i32 0x4ffe v482 = icmp.i32 uge v33, v481 - brz v482, ebb55 - jump ebb154 + brz v482, block55 + jump block154 -ebb154: +block154: trap heap_oob -ebb55: +block55: v483 = uextend.i64 v33 v484 = iadd_imm.i64 v0, -8 v485 = load.i64 v484 @@ -860,30 +860,30 @@ ebb55: v499 = bor v498, v84 v500 = icmp ne v490, v499 v501 = bint.i32 v500 - brnz v501, ebb57 - jump ebb56(v33, v14) + brnz v501, block57 + jump block56(v33, v14) -ebb56(v502: i32, v503: i32): +block56(v502: i32, v503: i32): v504 = copy v502 - jump ebb90(v503, v504) + jump block90(v503, v504) -ebb57: - jump ebb58(v33, v490) +block57: + jump block58(v33, v490) -ebb58(v505: i32, v506: i32): +block58(v505: i32, v506: i32): v507 = iconst.i32 4 v508 = iadd v505, v507 v509 = iconst.i32 1 v510 = iadd v505, v509 v511 = iconst.i32 0x4ffe v512 = icmp uge v508, v511 - brz v512, ebb59 - jump ebb155 + brz v512, block59 + jump block155 -ebb155: +block155: trap heap_oob -ebb59: +block59: v513 = uextend.i64 v508 v514 = iadd_imm.i64 v0, -8 v515 = load.i64 v514 @@ -892,41 +892,41 @@ ebb59: v518 = iconst.i32 0 v519 = icmp eq v517, v518 v520 = bint.i32 v519 - brnz v520, ebb61(v14) - jump ebb156 + brnz v520, block61(v14) + jump block156 -ebb156: +block156: v521 = iconst.i32 8 v522 = ishl.i32 v506, v521 v523 = bor v522, v517 v524 = icmp ne v523, v499 v525 = bint.i32 v524 - brnz v525, ebb58(v510, v523) - jump ebb60 + brnz v525, block58(v510, v523) + jump block60 -ebb60: - jump ebb61(v14) +block60: + jump block61(v14) -ebb61(v526: i32): +block61(v526: i32): v527 = iconst.i32 0 - brnz.i32 v517, ebb62(v510) - jump ebb157 + brnz.i32 v517, block62(v510) + jump block157 -ebb157: +block157: v528 = copy v527 - jump ebb62(v528) + jump block62(v528) -ebb62(v529: i32): +block62(v529: i32): v530 = copy v529 - jump ebb90(v526, v530) + jump block90(v526, v530) -ebb63: +block63: v531 = isub.i32 v348, v359 v532 = iconst.i32 1 v533 = iadd v531, v532 - jump ebb64(v359, v533) + jump block64(v359, v533) -ebb64(v534: i32, v535: i32): +block64(v534: i32, v535: i32): v536 = iconst.i32 1 v537 = iadd.i32 v343, v536 v538 = iconst.i32 0 @@ -938,49 +938,49 @@ ebb64(v534: i32, v535: i32): v544 = iadd v542, v543 v545 = iconst.i32 0 v546 = copy.i32 v350 - jump ebb65(v350, v546, v349, v541, v348, v351, v544, v534, v545, v535, v343, v364, v537, v539, v362) + jump block65(v350, v546, v349, v541, v348, v351, v544, v534, v545, v535, v343, v364, v537, v539, v362) -ebb65(v547: i32, v548: i32, v549: i32, v550: i32, v551: i32, v552: i32, v553: i32, v554: i32, v555: i32, v556: i32, v557: i32, v558: i32, v559: i32, v560: i32, v561: i32): +block65(v547: i32, v548: i32, v549: i32, v550: i32, v551: i32, v552: i32, v553: i32, v554: i32, v555: i32, v556: i32, v557: i32, v558: i32, v559: i32, v560: i32, v561: i32): v562 = copy v556 v563 = isub v547, v548 v564 = icmp uge v563, v549 v565 = bint.i32 v564 - brnz v565, ebb67(v547) - jump ebb158 + brnz v565, block67(v547) + jump block158 -ebb158: +block158: v566 = iconst.i32 0 v567 = call fn2(v0, v547, v566, v550) - brnz v567, ebb66 - jump ebb159 + brnz v567, block66 + jump block159 -ebb159: +block159: v568 = iadd v547, v550 - jump ebb67(v568) + jump block67(v568) -ebb66: +block66: v569 = isub.i32 v567, v548 v570 = icmp ult v569, v549 v571 = bint.i32 v570 - brnz v571, ebb89(v552) - jump ebb160 + brnz v571, block89(v552) + jump block160 -ebb160: +block160: v572 = copy.i32 v567 - jump ebb67(v572) + jump block67(v572) -ebb67(v573: i32): +block67(v573: i32): v574 = iconst.i32 1 v575 = iadd.i32 v548, v551 v576 = iconst.i32 0x4ffe v577 = icmp uge v575, v576 - brz v577, ebb68 - jump ebb161 + brz v577, block68 + jump block161 -ebb161: +block161: trap heap_oob -ebb68: +block68: v578 = uextend.i64 v575 v579 = iadd_imm.i64 v0, -8 v580 = load.i64 v579 @@ -998,13 +998,13 @@ ebb68: v592 = iadd v587, v591 v593 = iconst.i32 0x4ffe v594 = icmp uge v592, v593 - brz v594, ebb69 - jump ebb162 + brz v594, block69 + jump block162 -ebb162: +block162: trap heap_oob -ebb69: +block69: v595 = uextend.i64 v592 v596 = iadd_imm.i64 v0, -8 v597 = load.i64 v596 @@ -1014,22 +1014,22 @@ ebb69: v601 = iconst.i32 0 v602 = icmp eq v600, v601 v603 = bint.i32 v602 - brnz v603, ebb74 - jump ebb163 + brnz v603, block74 + jump block163 -ebb163: +block163: v604 = iconst.i32 2 v605 = ishl.i32 v582, v604 v606 = iadd.i32 v552, v605 v607 = iconst.i32 0x4ffe v608 = icmp uge v606, v607 - brz v608, ebb70 - jump ebb164 + brz v608, block70 + jump block164 -ebb164: +block164: trap heap_oob -ebb70: +block70: v609 = uextend.i64 v606 v610 = iadd_imm.i64 v0, -8 v611 = load.i64 v610 @@ -1039,64 +1039,64 @@ ebb70: v615 = iconst.i32 -1 v616 = icmp eq v614, v615 v617 = bint.i32 v616 - brnz v617, ebb75 - jump ebb165 + brnz v617, block75 + jump block165 -ebb165: +block165: v618 = iconst.i32 1 v619 = iadd v614, v618 v620 = icmp ult v619, v554 v621 = bint.i32 v620 v622 = copy.i32 v553 - brnz v621, ebb71(v622) - jump ebb166 + brnz v621, block71(v622) + jump block166 -ebb166: +block166: v623 = copy v619 - jump ebb71(v623) + jump block71(v623) -ebb71(v624: i32): +block71(v624: i32): v625 = copy v624 - brnz.i32 v555, ebb72(v625) - jump ebb72(v619) + brnz.i32 v555, block72(v625) + jump block72(v619) -ebb72(v626: i32): - brnz.i32 v562, ebb73(v626) - jump ebb73(v619) +block72(v626: i32): + brnz.i32 v562, block73(v626) + jump block73(v619) -ebb73(v627: i32): +block73(v627: i32): v628 = copy.i32 v554 v629 = copy.i32 v562 - jump ebb87(v548, v627, v573, v549, v550, v551, v552, v553, v628, v629, v557, v558, v559, v560, v561) + jump block87(v548, v627, v573, v549, v550, v551, v552, v553, v628, v629, v557, v558, v559, v560, v561) -ebb74: +block74: v630 = copy.i32 v549 v631 = copy.i32 v554 v632 = copy.i32 v562 - jump ebb87(v548, v630, v573, v549, v550, v551, v552, v553, v631, v632, v557, v558, v559, v560, v561) + jump block87(v548, v630, v573, v549, v550, v551, v552, v553, v631, v632, v557, v558, v559, v560, v561) -ebb75: +block75: v633 = icmp.i32 ugt v558, v555 v634 = bint.i32 v633 v635 = copy.i32 v558 - brnz v634, ebb76(v635) - jump ebb167 + brnz v634, block76(v635) + jump block167 -ebb167: +block167: v636 = copy.i32 v555 - jump ebb76(v636) + jump block76(v636) -ebb76(v637: i32): +block76(v637: i32): v638 = iadd.i32 v557, v637 v639 = iconst.i32 0x4ffe v640 = icmp uge v638, v639 - brz v640, ebb77 - jump ebb168 + brz v640, block77 + jump block168 -ebb168: +block168: trap heap_oob -ebb77: +block77: v641 = uextend.i64 v638 v642 = iadd_imm.i64 v0, -8 v643 = load.i64 v642 @@ -1105,27 +1105,27 @@ ebb77: v646 = iconst.i32 0 v647 = icmp eq v645, v646 v648 = bint.i32 v647 - brnz v648, ebb82(v548, v549, v551, v552) - jump ebb169 + brnz v648, block82(v548, v549, v551, v552) + jump block169 -ebb169: +block169: v649 = iadd.i32 v548, v637 v650 = iadd.i32 v559, v637 v651 = iadd.i32 v560, v637 - jump ebb78(v645, v649, v651, v650) + jump block78(v645, v649, v651, v650) -ebb78(v652: i32, v653: i32, v654: i32, v655: i32): +block78(v652: i32, v653: i32, v654: i32, v655: i32): v656 = iconst.i32 255 v657 = band v652, v656 v658 = iconst.i32 0x4ffe v659 = icmp uge v653, v658 - brz v659, ebb79 - jump ebb170 + brz v659, block79 + jump block170 -ebb170: +block170: trap heap_oob -ebb79: +block79: v660 = uextend.i64 v653 v661 = iadd_imm.i64 v0, -8 v662 = load.i64 v661 @@ -1135,23 +1135,23 @@ ebb79: v666 = bint.i32 v665 v667 = copy.i32 v554 v668 = copy.i32 v562 - brnz v666, ebb87(v548, v654, v573, v549, v550, v551, v552, v553, v667, v668, v557, v558, v559, v560, v561) - jump ebb171 + brnz v666, block87(v548, v654, v573, v549, v550, v551, v552, v553, v667, v668, v557, v558, v559, v560, v561) + jump block171 -ebb171: +block171: v669 = iconst.i32 1 v670 = iadd.i32 v653, v669 v671 = iconst.i32 1 v672 = iadd.i32 v654, v671 v673 = iconst.i32 0x4ffe v674 = icmp.i32 uge v655, v673 - brz v674, ebb80 - jump ebb172 + brz v674, block80 + jump block172 -ebb172: +block172: trap heap_oob -ebb80: +block80: v675 = uextend.i64 v655 v676 = iadd_imm.i64 v0, -8 v677 = load.i64 v676 @@ -1159,33 +1159,33 @@ ebb80: v679 = uload8.i32 v678 v680 = iconst.i32 1 v681 = iadd.i32 v655, v680 - brnz v679, ebb78(v679, v670, v672, v681) - jump ebb81 + brnz v679, block78(v679, v670, v672, v681) + jump block81 -ebb81: - jump ebb82(v548, v549, v551, v552) +block81: + jump block82(v548, v549, v551, v552) -ebb82(v682: i32, v683: i32, v684: i32, v685: i32): +block82(v682: i32, v683: i32, v684: i32, v685: i32): v686 = icmp.i32 ule v558, v555 v687 = bint.i32 v686 - brnz v687, ebb90(v685, v682) - jump ebb173 + brnz v687, block90(v685, v682) + jump block173 -ebb173: +block173: v688 = copy.i32 v561 - jump ebb83(v688) + jump block83(v688) -ebb83(v689: i32): +block83(v689: i32): v690 = iadd.i32 v557, v689 v691 = iconst.i32 0x4ffe v692 = icmp uge v690, v691 - brz v692, ebb84 - jump ebb174 + brz v692, block84 + jump block174 -ebb174: +block174: trap heap_oob -ebb84: +block84: v693 = uextend.i64 v690 v694 = iadd_imm.i64 v0, -8 v695 = load.i64 v694 @@ -1194,13 +1194,13 @@ ebb84: v698 = iadd.i32 v682, v689 v699 = iconst.i32 0x4ffe v700 = icmp uge v698, v699 - brz v700, ebb85 - jump ebb175 + brz v700, block85 + jump block175 -ebb175: +block175: trap heap_oob -ebb85: +block85: v701 = uextend.i64 v698 v702 = iadd_imm.i64 v0, -8 v703 = load.i64 v702 @@ -1208,10 +1208,10 @@ ebb85: v705 = uload8.i32 v704 v706 = icmp.i32 ne v697, v705 v707 = bint.i32 v706 - brnz v707, ebb86 - jump ebb176 + brnz v707, block86 + jump block176 -ebb176: +block176: v708 = icmp.i32 ule v689, v555 v709 = bint.i32 v708 v710 = iconst.i32 -1 @@ -1219,51 +1219,51 @@ ebb176: v712 = iconst.i32 0 v713 = icmp eq v709, v712 v714 = bint.i32 v713 - brnz v714, ebb83(v711) - jump ebb90(v685, v682) + brnz v714, block83(v711) + jump block90(v685, v682) -ebb86: +block86: v715 = copy.i32 v554 v716 = copy.i32 v562 - jump ebb88(v682, v554, v573, v683, v550, v684, v685, v553, v715, v562, v716, v557, v558, v559, v560, v561) + jump block88(v682, v554, v573, v683, v550, v684, v685, v553, v715, v562, v716, v557, v558, v559, v560, v561) -ebb87(v717: i32, v718: i32, v719: i32, v720: i32, v721: i32, v722: i32, v723: i32, v724: i32, v725: i32, v726: i32, v727: i32, v728: i32, v729: i32, v730: i32, v731: i32): +block87(v717: i32, v718: i32, v719: i32, v720: i32, v721: i32, v722: i32, v723: i32, v724: i32, v725: i32, v726: i32, v727: i32, v728: i32, v729: i32, v730: i32, v731: i32): v732 = copy v718 v733 = iconst.i32 0 - jump ebb88(v717, v732, v719, v720, v721, v722, v723, v724, v725, v733, v726, v727, v728, v729, v730, v731) + jump block88(v717, v732, v719, v720, v721, v722, v723, v724, v725, v733, v726, v727, v728, v729, v730, v731) -ebb88(v734: i32, v735: i32, v736: i32, v737: i32, v738: i32, v739: i32, v740: i32, v741: i32, v742: i32, v743: i32, v744: i32, v745: i32, v746: i32, v747: i32, v748: i32, v749: i32): +block88(v734: i32, v735: i32, v736: i32, v737: i32, v738: i32, v739: i32, v740: i32, v741: i32, v742: i32, v743: i32, v744: i32, v745: i32, v746: i32, v747: i32, v748: i32, v749: i32): v750 = iadd v734, v735 v751 = copy v742 v752 = copy v743 v753 = copy v744 - jump ebb65(v736, v750, v737, v738, v739, v740, v741, v751, v752, v753, v745, v746, v747, v748, v749) + jump block65(v736, v750, v737, v738, v739, v740, v741, v751, v752, v753, v745, v746, v747, v748, v749) -ebb89(v754: i32): +block89(v754: i32): v755 = iconst.i32 0 - jump ebb90(v754, v755) + jump block90(v754, v755) -ebb90(v756: i32, v757: i32): +block90(v756: i32, v757: i32): v758 = iconst.i32 0 v759 = iconst.i32 1056 v760 = iadd v756, v759 v761 = iconst.i32 0x4ffe v762 = icmp uge v758, v761 - brz v762, ebb91 - jump ebb177 + brz v762, block91 + jump block177 -ebb177: +block177: trap heap_oob -ebb91: +block91: v763 = uextend.i64 v758 v764 = iadd_imm.i64 v0, -8 v765 = load.i64 v764 v766 = iadd v765, v763 store.i32 v760, v766+4 - jump ebb92(v757) + jump block92(v757) -ebb92(v767: i32): +block92(v767: i32): return v767 } @@ -1274,7 +1274,7 @@ function %musl(f64 [%xmm0], i64 vmctx [%rdi]) -> f64 [%xmm0] system_v { sig0 = (f64 [%xmm0], i32 [%rdi], i64 vmctx [%rsi]) -> f64 [%xmm0] system_v fn0 = u0:517 sig0 -ebb0(v0: f64, v1: i64): +block0(v0: f64, v1: i64): v3 = iconst.i64 0 v4 = iconst.i32 0 v131 = iconst.i64 0 @@ -1306,39 +1306,39 @@ ebb0(v0: f64, v1: i64): v23 = iconst.i32 0x4086_232b v24 = icmp ult v22, v23 v25 = bint.i32 v24 - brnz v25, ebb10 - jump ebb178 + brnz v25, block10 + jump block178 -ebb178: +block178: v26 = iconst.i64 0x7fff_ffff_ffff_ffff v27 = band v14, v26 v28 = iconst.i64 0x7ff0_0000_0000_0000 v29 = icmp ule v27, v28 v30 = bint.i32 v29 - brnz v30, ebb9 - jump ebb2(v12, v0) + brnz v30, block9 + jump block2(v12, v0) -ebb10: +block10: v31 = iconst.i32 0x3fd6_2e43 v32 = icmp.i32 ult v22, v31 v33 = bint.i32 v32 - brnz v33, ebb8 - jump ebb179 + brnz v33, block8 + jump block179 -ebb179: +block179: v34 = iconst.i32 0x3ff0_a2b2 v35 = icmp.i32 uge v22, v34 v36 = bint.i32 v35 - brnz v36, ebb6 - jump ebb180 + brnz v36, block6 + jump block180 -ebb180: +block180: v37 = iconst.i32 1 v38 = bxor.i32 v17, v37 v39 = isub v38, v17 - jump ebb5(v0, v39) + jump block5(v0, v39) -ebb9: +block9: v138 = iconst.i64 0x4086_2e42_fefa_39ef v40 = bitcast.f64 v138 v41 = fcmp ge v40, v0 @@ -1348,39 +1348,39 @@ ebb9: v43 = bor v139, v140 v44 = bint.i32 v43 v45 = bor v42, v44 - brnz v45, ebb7 - jump ebb181 + brnz v45, block7 + jump block181 -ebb181: +block181: v141 = iconst.i64 0x7fe0_0000_0000_0000 v46 = bitcast.f64 v141 v47 = fmul.f64 v0, v46 - jump ebb2(v12, v47) + jump block2(v12, v47) -ebb8: +block8: v48 = iconst.i32 0x3e30_0000 v49 = icmp.i32 ule v22, v48 v50 = bint.i32 v49 - brnz v50, ebb3 - jump ebb182 + brnz v50, block3 + jump block182 -ebb182: +block182: v51 = iconst.i32 0 v142 = iconst.i64 0 v52 = bitcast.f64 v142 v178 = copy.f64 v0 - jump ebb4(v0, v178, v52, v51) + jump block4(v0, v178, v52, v51) -ebb7: +block7: v143 = iconst.i64 0xc086_232b_dd7a_bcd2 v53 = bitcast.f64 v143 v54 = fcmp.f64 ge v0, v53 v55 = bint.i32 v54 v56 = bor v55, v44 - brnz v56, ebb6 - jump ebb183 + brnz v56, block6 + jump block183 -ebb183: +block183: v144 = iconst.i64 0xb6a0_0000_0000_0000 v57 = bitcast.f64 v144 v58 = fdiv v57, v0 @@ -1396,10 +1396,10 @@ ebb183: v62 = bitcast.f64 v149 v63 = fcmp gt v62, v0 v64 = bint.i32 v63 - brnz v64, ebb2(v12, v61) - jump ebb6 + brnz v64, block2(v12, v61) + jump block6 -ebb6: +block6: v150 = iconst.i64 0x3ff7_1547_652b_82fe v66 = bitcast.f64 v150 v67 = fmul.f64 v0, v66 @@ -1416,34 +1416,34 @@ ebb6: v76 = x86_cvtt2si.i32 v75 v158 = iconst.i32 0x8000_0000 v154 = icmp ne v76, v158 - brnz v154, ebb11 - jump ebb184 + brnz v154, block11 + jump block184 -ebb184: +block184: v155 = fcmp uno v75, v75 - brz v155, ebb12 - jump ebb185 + brz v155, block12 + jump block185 -ebb185: +block185: trap bad_toint -ebb12: +block12: v159 = iconst.i64 0xc1e0_0000_0020_0000 v156 = bitcast.f64 v159 v157 = fcmp ge v156, v75 - brz v157, ebb13 - jump ebb186 + brz v157, block13 + jump block186 -ebb186: +block186: trap int_ovf -ebb13: - jump ebb11 +block13: + jump block11 -ebb11: - jump ebb5(v0, v76) +block11: + jump block5(v0, v76) -ebb5(v77: f64, v78: i32): +block5(v77: f64, v78: i32): v79 = fcvt_from_sint.f64 v78 v160 = iconst.i64 0xbfe6_2e42_fee0_0000 v80 = bitcast.f64 v160 @@ -1453,14 +1453,14 @@ ebb5(v77: f64, v78: i32): v83 = bitcast.f64 v161 v84 = fmul v79, v83 v85 = fsub v82, v84 - jump ebb4(v82, v85, v84, v78) + jump block4(v82, v85, v84, v78) -ebb4(v86: f64, v87: f64, v108: f64, v113: i32): +block4(v86: f64, v87: f64, v108: f64, v113: i32): v88 = fmul v87, v87 v162 = iconst.i64 0x3e66_3769_72be_a4d0 v89 = bitcast.f64 v162 v90 = fmul v88, v89 - v163 = iconst.i64 0xbebb_bd41_c5d2_6bf1 + v163 = iconst.i64 0xbeeb_bd41_c5d2_6bf1 v91 = bitcast.f64 v163 v92 = fadd v90, v91 v93 = fmul v88, v92 @@ -1490,14 +1490,14 @@ ebb4(v86: f64, v87: f64, v108: f64, v113: i32): v169 = iconst.i32 0 v114 = icmp eq v113, v169 v115 = bint.i32 v114 - brnz v115, ebb2(v12, v112) - jump ebb187 + brnz v115, block2(v12, v112) + jump block187 -ebb187: +block187: v116 = call fn0(v112, v113, v1) - jump ebb2(v12, v116) + jump block2(v12, v116) -ebb3: +block3: v170 = iconst.i64 0x7fe0_0000_0000_0000 v117 = bitcast.f64 v170 v118 = fadd.f64 v0, v117 @@ -1509,9 +1509,9 @@ ebb3: v174 = iconst.i64 0x3ff0_0000_0000_0000 v120 = bitcast.f64 v174 v121 = fadd.f64 v0, v120 - jump ebb2(v12, v121) + jump block2(v12, v121) -ebb2(v123: i32, v130: f64): +block2(v123: i32, v130: f64): v122 = iconst.i32 0 v127 = iconst.i32 16 v128 = iadd v123, v127 @@ -1520,8 +1520,8 @@ ebb2(v123: i32, v130: f64): v177 = load.i64 v176 v129 = iadd v177, v175 store v128, v129+4 - jump ebb1(v130) + jump block1(v130) -ebb1(v2: f64): +block1(v2: f64): return v2 } diff --git a/cranelift/filetests/filetests/regalloc/coalescing-216.clif b/cranelift/filetests/filetests/regalloc/coalescing-216.clif index b8de70160a..020ced084b 100644 --- a/cranelift/filetests/filetests/regalloc/coalescing-216.clif +++ b/cranelift/filetests/filetests/regalloc/coalescing-216.clif @@ -5,83 +5,83 @@ target x86_64 haswell ; ; The (old) coalescer creates a virtual register with two identical values. function %pr216(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v { -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v3 = iconst.i64 0 v5 = iconst.i32 0 - brz v5, ebb3(v3) - jump ebb4(v3, v3) + brz v5, block3(v3) + jump block4(v3, v3) -ebb4(v11: i64, v29: i64): +block4(v11: i64, v29: i64): v6 = iconst.i32 0 - brz v6, ebb14 - jump ebb15 + brz v6, block14 + jump block15 -ebb15: +block15: v9 = iconst.i32 -17 v12 = iconst.i32 0xffff_ffff_ffff_8000 - jump ebb9(v12) + jump block9(v12) -ebb9(v10: i32): - brnz v10, ebb8(v9, v11, v11) - jump ebb16 +block9(v10: i32): + brnz v10, block8(v9, v11, v11) + jump block16 -ebb16: - brz.i32 v9, ebb13 - jump ebb17 +block16: + brz.i32 v9, block13 + jump block17 -ebb17: +block17: v13 = iconst.i32 0 - brnz v13, ebb6(v11, v11) - jump ebb18 + brnz v13, block6(v11, v11) + jump block18 -ebb18: +block18: v14 = iconst.i32 0 - brz v14, ebb12 - jump ebb11 + brz v14, block12 + jump block11 -ebb12: - jump ebb4(v11, v11) +block12: + jump block4(v11, v11) -ebb11: - jump ebb10(v11) +block11: + jump block10(v11) -ebb13: +block13: v15 = iconst.i64 1 - jump ebb10(v15) + jump block10(v15) -ebb10(v21: i64): +block10(v21: i64): v16 = iconst.i32 0 - brnz v16, ebb6(v21, v11) - jump ebb19 + brnz v16, block6(v21, v11) + jump block19 -ebb19: +block19: v17 = iconst.i32 0xffff_ffff_ffff_9f35 - jump ebb8(v17, v21, v11) + jump block8(v17, v21, v11) -ebb8(v8: i32, v23: i64, v28: i64): - jump ebb7(v8, v23, v28) +block8(v8: i32, v23: i64, v28: i64): + jump block7(v8, v23, v28) -ebb14: +block14: v18 = iconst.i32 0 - jump ebb7(v18, v11, v29) + jump block7(v18, v11, v29) -ebb7(v7: i32, v22: i64, v27: i64): - jump ebb6(v22, v27) +block7(v7: i32, v22: i64, v27: i64): + jump block6(v22, v27) -ebb6(v20: i64, v25: i64): +block6(v20: i64, v25: i64): v19 = iconst.i32 0xffc7 - brnz v19, ebb4(v20, v25) - jump ebb5 + brnz v19, block4(v20, v25) + jump block5 -ebb5: - jump ebb3(v25) +block5: + jump block3(v25) -ebb3(v24: i64): - jump ebb2(v24) +block3(v24: i64): + jump block2(v24) -ebb2(v4: i64): - jump ebb1(v4) +block2(v4: i64): + jump block1(v4) -ebb1(v2: i64): +block1(v2: i64): return v2 } diff --git a/cranelift/filetests/filetests/regalloc/coloring-227.clif b/cranelift/filetests/filetests/regalloc/coloring-227.clif index 0418924dfb..52b36d4c70 100644 --- a/cranelift/filetests/filetests/regalloc/coloring-227.clif +++ b/cranelift/filetests/filetests/regalloc/coloring-227.clif @@ -5,81 +5,81 @@ function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8]) gv0 = vmctx heap0 = static gv0, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000 - ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i64): + block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i64): [RexOp1pu_id#b8] v5 = iconst.i32 0 [RexOp1pu_id#b8] v6 = iconst.i32 0 -[RexOp1tjccb#74] brz v6, ebb10 -[Op1jmpb#eb] jump ebb3(v5, v5, v5, v5, v5, v5, v0, v1, v2, v3) +[RexOp1tjccb#74] brz v6, block10 +[Op1jmpb#eb] jump block3(v5, v5, v5, v5, v5, v5, v0, v1, v2, v3) - ebb3(v15: i32, v17: i32, v25: i32, v31: i32, v40: i32, v47: i32, v54: i32, v61: i32, v68: i32, v75: i32): -[Op1jmpb#eb] jump ebb6 + block3(v15: i32, v17: i32, v25: i32, v31: i32, v40: i32, v47: i32, v54: i32, v61: i32, v68: i32, v75: i32): +[Op1jmpb#eb] jump block6 - ebb6: + block6: [RexOp1pu_id#b8] v8 = iconst.i32 0 -[RexOp1tjccb#75] brnz v8, ebb5 -[Op1jmpb#eb] jump ebb20 +[RexOp1tjccb#75] brnz v8, block5 +[Op1jmpb#eb] jump block20 - ebb20: + block20: [RexOp1pu_id#b8] v9 = iconst.i32 0 [RexOp1pu_id#b8] v11 = iconst.i32 0 [DynRexOp1icscc#39] v12 = icmp.i32 eq v15, v11 [RexOp2urm_noflags#4b6] v13 = bint.i32 v12 [DynRexOp1rr#21] v14 = band v9, v13 -[RexOp1tjccb#75] brnz v14, ebb6 -[Op1jmpb#eb] jump ebb7 +[RexOp1tjccb#75] brnz v14, block6 +[Op1jmpb#eb] jump block7 - ebb7: -[RexOp1tjccb#74] brz.i32 v17, ebb8 -[Op1jmpb#eb] jump ebb17 + block7: +[RexOp1tjccb#74] brz.i32 v17, block8 +[Op1jmpb#eb] jump block17 - ebb17: + block17: [RexOp1pu_id#b8] v18 = iconst.i32 0 -[RexOp1tjccb#74] brz v18, ebb9 -[Op1jmpb#eb] jump ebb16 +[RexOp1tjccb#74] brz v18, block9 +[Op1jmpb#eb] jump block16 - ebb16: + block16: [RexOp1pu_id#b8] v21 = iconst.i32 0 [RexOp1umr#89] v79 = uextend.i64 v5 [DynRexOp1r_ib#8083] v80 = iadd_imm.i64 v4, 0 [RexOp1ld#808b] v81 = load.i64 v80 [DynRexOp1rr#8001] v22 = iadd v81, v79 [RexMp1st#189] istore16 v21, v22 -[Op1jmpb#eb] jump ebb9 +[Op1jmpb#eb] jump block9 - ebb9: -[Op1jmpb#eb] jump ebb8 + block9: +[Op1jmpb#eb] jump block8 - ebb8: + block8: [RexOp1pu_id#b8] v27 = iconst.i32 3 [RexOp1pu_id#b8] v28 = iconst.i32 4 [DynRexOp1rr#09] v35 = bor.i32 v31, v13 -[RexOp1tjccb#75] brnz v35, ebb15(v27) -[Op1jmpb#eb] jump ebb15(v28) +[RexOp1tjccb#75] brnz v35, block15(v27) +[Op1jmpb#eb] jump block15(v28) - ebb15(v36: i32): -[Op1jmpb#eb] jump ebb3(v25, v36, v25, v31, v40, v47, v54, v61, v68, v75) + block15(v36: i32): +[Op1jmpb#eb] jump block3(v25, v36, v25, v31, v40, v47, v54, v61, v68, v75) - ebb5: -[Op1jmpb#eb] jump ebb4 + block5: +[Op1jmpb#eb] jump block4 - ebb4: -[Op1jmpb#eb] jump ebb2(v40, v47, v54, v61, v68, v75) + block4: +[Op1jmpb#eb] jump block2(v40, v47, v54, v61, v68, v75) - ebb10: + block10: [RexOp1pu_id#b8] v43 = iconst.i32 0 -[Op1jmpb#eb] jump ebb2(v43, v5, v0, v1, v2, v3) +[Op1jmpb#eb] jump block2(v43, v5, v0, v1, v2, v3) - ebb2(v7: i32, v45: i32, v52: i32, v59: i32, v66: i32, v73: i32): + block2(v7: i32, v45: i32, v52: i32, v59: i32, v66: i32, v73: i32): [RexOp1pu_id#b8] v44 = iconst.i32 0 -[RexOp1tjccb#74] brz v44, ebb12 -[Op1jmpb#eb] jump ebb18 +[RexOp1tjccb#74] brz v44, block12 +[Op1jmpb#eb] jump block18 - ebb18: + block18: [RexOp1pu_id#b8] v50 = iconst.i32 11 -[RexOp1tjccb#74] brz v50, ebb14 -[Op1jmpb#eb] jump ebb19 +[RexOp1tjccb#74] brz v50, block14 +[Op1jmpb#eb] jump block19 - ebb19: + block19: [RexOp1umr#89] v82 = uextend.i64 v52 [DynRexOp1r_ib#8083] v83 = iadd_imm.i64 v4, 0 [RexOp1ld#808b] v84 = load.i64 v83 @@ -91,25 +91,25 @@ function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8]) [DynRexOp1rr#8001] v64 = iadd v87, v85 [RexOp1st#88] istore8 v59, v64 [RexOp1pu_id#b8] v65 = iconst.i32 0 -[Op1jmpb#eb] jump ebb13(v65) +[Op1jmpb#eb] jump block13(v65) - ebb14: -[Op1jmpb#eb] jump ebb13(v66) + block14: +[Op1jmpb#eb] jump block13(v66) - ebb13(v51: i32): + block13(v51: i32): [RexOp1umr#89] v88 = uextend.i64 v45 [DynRexOp1r_ib#8083] v89 = iadd_imm.i64 v4, 0 [RexOp1ld#808b] v90 = load.i64 v89 [DynRexOp1rr#8001] v71 = iadd v90, v88 [RexOp1st#89] store v51, v71 -[Op1jmpb#eb] jump ebb12 +[Op1jmpb#eb] jump block12 - ebb12: -[Op1jmpb#eb] jump ebb11 + block12: +[Op1jmpb#eb] jump block11 - ebb11: -[Op1jmpb#eb] jump ebb1 + block11: +[Op1jmpb#eb] jump block1 - ebb1: + block1: [Op1ret#c3] return } diff --git a/cranelift/filetests/filetests/regalloc/constraints.clif b/cranelift/filetests/filetests/regalloc/constraints.clif index a0478baae9..60cd731ed8 100644 --- a/cranelift/filetests/filetests/regalloc/constraints.clif +++ b/cranelift/filetests/filetests/regalloc/constraints.clif @@ -6,7 +6,7 @@ target i686 ; Tied operands, both are killed at instruction. function %tied_easy() -> i32 { -ebb0: +block0: v0 = iconst.i32 12 v1 = iconst.i32 13 ; not: copy @@ -17,7 +17,7 @@ ebb0: ; Tied operand is live after instruction. function %tied_alive() -> i32 { -ebb0: +block0: v0 = iconst.i32 12 v1 = iconst.i32 13 ; check: $(v0c=$V) = copy v0 @@ -30,7 +30,7 @@ ebb0: ; Fixed register constraint. function %fixed_op() -> i32 { -ebb0: +block0: ; check: ,%rax] ; sameln: v0 = iconst.i32 12 v0 = iconst.i32 12 @@ -43,7 +43,7 @@ ebb0: ; Fixed register constraint twice. function %fixed_op_twice() -> i32 { -ebb0: +block0: ; check: ,%rax] ; sameln: v0 = iconst.i32 12 v0 = iconst.i32 12 @@ -60,7 +60,7 @@ ebb0: ; Tied use of a diverted register. function %fixed_op_twice() -> i32 { -ebb0: +block0: ; check: ,%rax] ; sameln: v0 = iconst.i32 12 v0 = iconst.i32 12 diff --git a/cranelift/filetests/filetests/regalloc/fallthrough-return.clif b/cranelift/filetests/filetests/regalloc/fallthrough-return.clif index 557710eb5a..58ec61f0d8 100644 --- a/cranelift/filetests/filetests/regalloc/fallthrough-return.clif +++ b/cranelift/filetests/filetests/regalloc/fallthrough-return.clif @@ -6,7 +6,7 @@ target x86_64 function %foo() -> f64 { fn0 = %bar() -ebb0: +block0: v0 = f64const 0.0 call fn0() fallthrough_return v0 @@ -16,7 +16,7 @@ ebb0: function %foo() -> f64 { fn0 = %bar() -> f64, f64 -ebb0: +block0: v0, v1 = call fn0() fallthrough_return v1 } diff --git a/cranelift/filetests/filetests/regalloc/ghost-param.clif b/cranelift/filetests/filetests/regalloc/ghost-param.clif index f2a1883a0d..d51f4a7f72 100644 --- a/cranelift/filetests/filetests/regalloc/ghost-param.clif +++ b/cranelift/filetests/filetests/regalloc/ghost-param.clif @@ -1,45 +1,45 @@ test regalloc target x86_64 haswell -; This test case would create an EBB parameter that was a ghost value. +; This test case would create a block parameter that was a ghost value. ; The coalescer would insert a copy of the ghost value, leading to verifier errors. ; -; We don't allow EBB parameters to be ghost values any longer. +; We don't allow block parameters to be ghost values any longer. ; ; Test case by binaryen fuzzer! function %pr215(i64 vmctx [%rdi]) system_v { -ebb0(v0: i64): +block0(v0: i64): v10 = iconst.i64 0 v1 = bitcast.f64 v10 - jump ebb5(v1) + jump block5(v1) -ebb5(v9: f64): +block5(v9: f64): v11 = iconst.i64 0xffff_ffff_ff9a_421a v4 = bitcast.f64 v11 v6 = iconst.i32 0 v7 = iconst.i32 1 - brnz v7, ebb4(v6) - jump ebb8 + brnz v7, block4(v6) + jump block8 -ebb8: +block8: v8 = iconst.i32 0 - jump ebb7(v8) + jump block7(v8) -ebb7(v5: i32): - brnz v5, ebb3(v4) - jump ebb5(v4) +block7(v5: i32): + brnz v5, block3(v4) + jump block5(v4) -ebb4(v3: i32): - brnz v3, ebb2 - jump ebb3(v9) +block4(v3: i32): + brnz v3, block2 + jump block3(v9) -ebb3(v2: f64): - jump ebb2 +block3(v2: f64): + jump block2 -ebb2: - jump ebb1 +block2: + jump block1 -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/regalloc/global-constraints.clif b/cranelift/filetests/filetests/regalloc/global-constraints.clif index 11a3dbef2c..8149b9bae6 100644 --- a/cranelift/filetests/filetests/regalloc/global-constraints.clif +++ b/cranelift/filetests/filetests/regalloc/global-constraints.clif @@ -7,19 +7,19 @@ target i686 ; The icmp_imm instrutions write their b1 result to the ABCD register class on ; 32-bit x86. So if we define 5 live values, they can't all fit. function %global_constraints(i32) { -ebb0(v0: i32): +block0(v0: i32): v1 = icmp_imm eq v0, 1 v2 = icmp_imm ugt v0, 2 v3 = icmp_imm sle v0, 3 v4 = icmp_imm ne v0, 4 v5 = icmp_imm sge v0, 5 - brnz v5, ebb1 - jump ebb2 + brnz v5, block1 + jump block2 -ebb2: +block2: return -ebb1: +block1: ; Make sure v1-v5 are live in. v10 = band v1, v2 v11 = bor v3, v4 diff --git a/cranelift/filetests/filetests/regalloc/global-fixed.clif b/cranelift/filetests/filetests/regalloc/global-fixed.clif index eb8e23d7af..851f012492 100644 --- a/cranelift/filetests/filetests/regalloc/global-fixed.clif +++ b/cranelift/filetests/filetests/regalloc/global-fixed.clif @@ -2,15 +2,15 @@ test regalloc target x86_64 haswell function %foo() system_v { -ebb4: +block4: v3 = iconst.i32 0 - jump ebb3 + jump block3 -ebb3: +block3: v9 = udiv v3, v3 - jump ebb1 + jump block1 -ebb1: +block1: v19 = iadd.i32 v9, v9 - jump ebb3 + jump block3 } diff --git a/cranelift/filetests/filetests/regalloc/gpr-deref-safe-335.clif b/cranelift/filetests/filetests/regalloc/gpr-deref-safe-335.clif index 8e8b2260cf..04e9cc54fb 100644 --- a/cranelift/filetests/filetests/regalloc/gpr-deref-safe-335.clif +++ b/cranelift/filetests/filetests/regalloc/gpr-deref-safe-335.clif @@ -2,43 +2,43 @@ test regalloc target x86_64 function u0:587() fast { -ebb0: +block0: v97 = iconst.i32 0 v169 = iconst.i32 0 v1729 = iconst.i32 0 - jump ebb100(v97, v97, v97, v97, v97) + jump block100(v97, v97, v97, v97, v97) -ebb100(v1758: i32, v1784: i32, v1845: i32, v1856: i32, v1870: i32): +block100(v1758: i32, v1784: i32, v1845: i32, v1856: i32, v1870: i32): v1762 = iconst.i32 0 v1769 = iconst.i32 0 v1774 = iconst.i32 0 v1864 = iconst.i32 0 v1897 = iconst.i32 0 - jump ebb102(v1774, v1784, v1845, v1856, v1870, v1758, v1762, v169, v1729, v97, v169, v169, v169, v169) + jump block102(v1774, v1784, v1845, v1856, v1870, v1758, v1762, v169, v1729, v97, v169, v169, v169, v169) -ebb102(v1785: i32, v1789: i32, v1843: i32, v1854: i32, v1868: i32, v1882: i32, v1890: i32, v1901: i32, v1921: i32, v1933: i32, v2058: i32, v2124: i32, v2236: i32, v2366: i32): +block102(v1785: i32, v1789: i32, v1843: i32, v1854: i32, v1868: i32, v1882: i32, v1890: i32, v1901: i32, v1921: i32, v1933: i32, v2058: i32, v2124: i32, v2236: i32, v2366: i32): v1929 = iconst.i32 0 v1943 = iconst.i32 0 v1949 = iconst.i32 0 - jump ebb123(v1897, v1769) + jump block123(v1897, v1769) -ebb123(v1950: i32, v1979: i32): +block123(v1950: i32, v1979: i32): v1955 = iconst.i32 0 - brz v1955, ebb125 - jump ebb122(v1929, v1843, v1864, v2058, v1882, v1897, v1943, v1868, v2124, v1901) + brz v1955, block125 + jump block122(v1929, v1843, v1864, v2058, v1882, v1897, v1943, v1868, v2124, v1901) -ebb125: +block125: v1961 = iadd_imm.i32 v1949, 0 v1952 = iconst.i32 0 v1962 = iconst.i64 0 v1963 = load.i32 v1962 - brz v1963, ebb123(v1952, v1961) - jump ebb127 + brz v1963, block123(v1952, v1961) + jump block127 -ebb127: +block127: v1966 = iconst.i32 0 - jump ebb122(v1963, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966) + jump block122(v1963, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966) -ebb122(v1967: i32, v1971: i32, v1972: i32, v1978: i32, v2032: i32, v2041: i32, v2053: i32, v2076: i32, v2085: i32, v2096: i32): +block122(v1967: i32, v1971: i32, v1972: i32, v1978: i32, v2032: i32, v2041: i32, v2053: i32, v2076: i32, v2085: i32, v2096: i32): trap user0 } diff --git a/cranelift/filetests/filetests/regalloc/infinite-interference.clif b/cranelift/filetests/filetests/regalloc/infinite-interference.clif index bce607b7f6..b7a7736405 100644 --- a/cranelift/filetests/filetests/regalloc/infinite-interference.clif +++ b/cranelift/filetests/filetests/regalloc/infinite-interference.clif @@ -7,31 +7,31 @@ target riscv32 ; resolve that conflict since v1 will just interfere with the inserted copy too. ;function %c1(i32) -> i32 { -;ebb0(v0: i32): +;block0(v0: i32): ; v1 = iadd_imm v0, 1 ; v2 = iconst.i32 1 -; brz v1, ebb1(v2) -; jump ebb2 +; brz v1, block1(v2) +; jump block2 ; -;ebb1(v3: i32): +;block1(v3: i32): ; return v3 ; -;ebb2: -; jump ebb1(v1) +;block2: +; jump block1(v1) ;} ; Same thing with v1 and v2 swapped to reverse the order of definitions. function %c2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd_imm v0, 1 v2 = iconst.i32 1 - brz v2, ebb1(v1) - jump ebb2 + brz v2, block1(v1) + jump block2 -ebb1(v3: i32): +block1(v3: i32): return v3 -ebb2: - jump ebb1(v2) +block2: + jump block1(v2) } diff --git a/cranelift/filetests/filetests/regalloc/iterate.clif b/cranelift/filetests/filetests/regalloc/iterate.clif index 347dbc5f29..2c7d691765 100644 --- a/cranelift/filetests/filetests/regalloc/iterate.clif +++ b/cranelift/filetests/filetests/regalloc/iterate.clif @@ -2,7 +2,7 @@ test regalloc target x86_64 haswell function u0:9(i64 [%rdi], f32 [%xmm0], f64 [%xmm1], i32 [%rsi], i32 [%rdx], i64 vmctx [%r14]) -> i64 [%rax] baldrdash_system_v { -ebb0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64): +block0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64): v32 = iconst.i32 0 v6 = bitcast.f32 v32 v7 = iconst.i64 0 @@ -19,23 +19,23 @@ ebb0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64): v14 = bitcast.f64 v36 v44 = iconst.i64 0 v37 = icmp slt v0, v44 - brnz v37, ebb2 - jump ebb11 + brnz v37, block2 + jump block11 -ebb11: +block11: v38 = fcvt_from_sint.f64 v0 - jump ebb3(v38) + jump block3(v38) -ebb2: +block2: v45 = iconst.i32 1 v39 = ushr.i64 v0, v45 v40 = band_imm.i64 v0, 1 v41 = bor v39, v40 v42 = fcvt_from_sint.f64 v41 v43 = fadd v42, v42 - jump ebb3(v43) + jump block3(v43) -ebb3(v15: f64): +block3(v15: f64): v16 = fpromote.f64 v9 v46 = uextend.i64 v10 v17 = fcvt_from_sint.f64 v46 @@ -43,42 +43,42 @@ ebb3(v15: f64): v19 = fpromote.f64 v12 v54 = iconst.i64 0 v47 = icmp.i64 slt v13, v54 - brnz v47, ebb4 - jump ebb12 + brnz v47, block4 + jump block12 -ebb12: +block12: v48 = fcvt_from_sint.f64 v13 - jump ebb5(v48) + jump block5(v48) -ebb4: +block4: v55 = iconst.i32 1 v49 = ushr.i64 v13, v55 v50 = band_imm.i64 v13, 1 v51 = bor v49, v50 v52 = fcvt_from_sint.f64 v51 v53 = fadd v52, v52 - jump ebb5(v53) + jump block5(v53) -ebb5(v20: f64): +block5(v20: f64): v63 = iconst.i64 0 v56 = icmp.i64 slt v7, v63 - brnz v56, ebb6 - jump ebb13 + brnz v56, block6 + jump block13 -ebb13: +block13: v57 = fcvt_from_sint.f64 v7 - jump ebb7(v57) + jump block7(v57) -ebb6: +block6: v64 = iconst.i32 1 v58 = ushr.i64 v7, v64 v59 = band_imm.i64 v7, 1 v60 = bor v58, v59 v61 = fcvt_from_sint.f64 v60 v62 = fadd v61, v61 - jump ebb7(v62) + jump block7(v62) -ebb7(v21: f64): +block7(v21: f64): v22 = fadd v21, v14 v23 = fadd.f64 v20, v22 v24 = fadd.f64 v19, v23 @@ -90,34 +90,34 @@ ebb7(v21: f64): v30 = x86_cvtt2si.i64 v29 v69 = iconst.i64 0x8000_0000_0000_0000 v65 = icmp ne v30, v69 - brnz v65, ebb8 - jump ebb15 + brnz v65, block8 + jump block15 -ebb15: +block15: v66 = fcmp uno v29, v29 - brz v66, ebb9 - jump ebb16 + brz v66, block9 + jump block16 -ebb16: +block16: trap bad_toint -ebb9: +block9: v70 = iconst.i64 0xc3e0_0000_0000_0000 v67 = bitcast.f64 v70 v68 = fcmp gt v67, v29 - brz v68, ebb10 - jump ebb17 + brz v68, block10 + jump block17 -ebb17: +block17: trap int_ovf -ebb10: - jump ebb8 +block10: + jump block8 -ebb8: - jump ebb1(v30) +block8: + jump block1(v30) -ebb1(v31: i64): +block1(v31: i64): return v31 } @@ -126,7 +126,7 @@ function u0:26(i64 vmctx [%r14]) -> i64 [%rax] baldrdash_system_v { gv0 = iadd_imm.i64 gv1, 48 sig0 = (i32 [%rdi], i64 [%rsi], i64 vmctx [%r14], i64 sigid [%rbx]) -> i64 [%rax] baldrdash_system_v -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i32 32 v2 = iconst.i64 64 v3 = iconst.i32 9 @@ -135,30 +135,30 @@ ebb0(v0: i64): v6 = load.i32 v5 v7 = icmp uge v3, v6 ; If we're unlucky, there are no ABCD registers available for v7 at this branch. - brz v7, ebb2 - jump ebb4 + brz v7, block2 + jump block4 -ebb4: +block4: trap oob -ebb2: +block2: v8 = load.i64 v5+8 v9 = uextend.i64 v3 v16 = iconst.i64 16 v10 = imul v9, v16 v11 = iadd v8, v10 v12 = load.i64 v11 - brnz v12, ebb3 - jump ebb5 + brnz v12, block3 + jump block5 -ebb5: +block5: trap icall_null -ebb3: +block3: v13 = load.i64 v11+8 v14 = call_indirect.i64 sig0, v12(v1, v2, v13, v4) - jump ebb1(v14) + jump block1(v14) -ebb1(v15: i64): +block1(v15: i64): return v15 } diff --git a/cranelift/filetests/filetests/regalloc/multi-constraints.clif b/cranelift/filetests/filetests/regalloc/multi-constraints.clif index d0b6f7faf0..b01be532f8 100644 --- a/cranelift/filetests/filetests/regalloc/multi-constraints.clif +++ b/cranelift/filetests/filetests/regalloc/multi-constraints.clif @@ -14,16 +14,16 @@ target x86_64 haswell ; ; - The same value used for a tied operand and a fixed operand. ; - The common value is already in %rcx. -; - The tied output value is live outside the EBB. +; - The tied output value is live outside the block. ; ; Under these conditions, Solver::add_tied_input() would create a variable for the tied input ; without considering the fixed constraint. function %pr221(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] { -ebb0(v0: i64, v1: i64, v2: i64, v3: i64): +block0(v0: i64, v1: i64, v2: i64, v3: i64): v4 = ushr v3, v3 - jump ebb1 + jump block1 -ebb1: +block1: return v4 } @@ -37,13 +37,13 @@ ebb1: ; Since the ushr x, x result is forced to be placed in %rcx, we must set the replace_global_defines ; flag so it can be reassigned to a different global register. function %pr218(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] { -ebb0(v0: i64, v1: i64, v2: i64, v3: i64): +block0(v0: i64, v1: i64, v2: i64, v3: i64): ; check: regmove v3, %rcx -> v4 = ushr v0, v0 ; check: v4 = copy - jump ebb1 + jump block1 -ebb1: +block1: ; v3 is globally live in %rcx. ; v4 is also globally live. Needs to be assigned something else for the trip across the CFG edge. v5 = iadd v3, v4 diff --git a/cranelift/filetests/filetests/regalloc/multiple-returns.clif b/cranelift/filetests/filetests/regalloc/multiple-returns.clif index 6514e3e030..3481747a60 100644 --- a/cranelift/filetests/filetests/regalloc/multiple-returns.clif +++ b/cranelift/filetests/filetests/regalloc/multiple-returns.clif @@ -4,7 +4,7 @@ target x86_64 ; Return the same value twice. This needs a copy so that each value can be ; allocated its own register. function %multiple_returns() -> i64, i64 { -ebb0: +block0: v2 = iconst.i64 0 return v2, v2 } @@ -14,7 +14,7 @@ ebb0: ; Same thing, now with a fallthrough_return. function %multiple_returns() -> i64, i64 { -ebb0: +block0: v2 = iconst.i64 0 fallthrough_return v2, v2 } diff --git a/cranelift/filetests/filetests/regalloc/output-interference.clif b/cranelift/filetests/filetests/regalloc/output-interference.clif index ab027a72fb..513c81f4e5 100644 --- a/cranelift/filetests/filetests/regalloc/output-interference.clif +++ b/cranelift/filetests/filetests/regalloc/output-interference.clif @@ -2,7 +2,7 @@ test regalloc target x86_64 haswell function %test(i64) -> i64 system_v { -ebb0(v0: i64): +block0(v0: i64): v2 = iconst.i64 12 ; This division clobbers two of its fixed input registers on x86. ; These are FixedTied constraints that the spiller needs to resolve. diff --git a/cranelift/filetests/filetests/regalloc/reload-208.clif b/cranelift/filetests/filetests/regalloc/reload-208.clif index 23783def04..6a723f02f5 100644 --- a/cranelift/filetests/filetests/regalloc/reload-208.clif +++ b/cranelift/filetests/filetests/regalloc/reload-208.clif @@ -2,14 +2,14 @@ test regalloc target x86_64 haswell ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ ; Filed as https://github.com/bytecodealliance/cranelift/issues/208 ; ; The verifier complains about a branch argument that is not in the same virtual register as the -; corresponding EBB argument. +; corresponding block argument. ; -; The problem was the reload pass rewriting EBB arguments on "brnz v9, ebb3(v9)" +; The problem was the reload pass rewriting block arguments on "brnz v9, block3(v9)" function %pr208(i64 vmctx [%rdi]) system_v { gv1 = vmctx @@ -20,18 +20,18 @@ function %pr208(i64 vmctx [%rdi]) system_v { fn0 = u0:1 sig0 fn1 = u0:3 sig1 -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i32 0 v2 = call fn0(v0) v20 = iconst.i32 0x4ffe v16 = icmp uge v2, v20 - brz v16, ebb5 - jump ebb9 + brz v16, block5 + jump block9 -ebb9: +block9: trap heap_oob -ebb5: +block5: v17 = uextend.i64 v2 v18 = iadd_imm.i64 v0, -8 v19 = load.i64 v18 @@ -40,25 +40,25 @@ ebb5: v21 = iconst.i32 0 v5 = icmp eq v4, v21 v6 = bint.i32 v5 - brnz v6, ebb2 - jump ebb3(v4) + brnz v6, block2 + jump block3(v4) - ; check: ebb5: - ; check: jump ebb3(v4) - ; check: $(splitEdge=$EBB): - ; nextln: jump ebb3(v9) + ; check: block5: + ; check: jump block3(v4) + ; check: $(splitEdge=$BB): + ; nextln: jump block3(v9) -ebb3(v7: i32): +block3(v7: i32): call fn1(v0, v7) v26 = iconst.i32 0x4ffe v22 = icmp uge v7, v26 - brz v22, ebb6 - jump ebb10 + brz v22, block6 + jump block10 -ebb10: +block10: trap heap_oob -ebb6: +block6: v23 = uextend.i64 v7 v24 = iadd_imm.i64 v0, -8 v25 = load.i64 v24 @@ -66,23 +66,23 @@ ebb6: v9 = load.i32 v8+56 ; check: v9 = spill ; check: brnz $V, $splitEdge - brnz v9, ebb3(v9) - jump ebb4 + brnz v9, block3(v9) + jump block4 -ebb4: - jump ebb2 +block4: + jump block2 -ebb2: +block2: v10 = iconst.i32 0 v31 = iconst.i32 0x4ffe v27 = icmp uge v10, v31 - brz v27, ebb7 - jump ebb11 + brz v27, block7 + jump block11 -ebb11: +block11: trap heap_oob -ebb7: +block7: v28 = uextend.i64 v10 v29 = iadd_imm.i64 v0, -8 v30 = load.i64 v29 @@ -92,21 +92,21 @@ ebb7: v13 = iconst.i32 0 v36 = iconst.i32 0x4ffe v32 = icmp uge v13, v36 - brz v32, ebb8 - jump ebb12 + brz v32, block8 + jump block12 -ebb12: +block12: trap heap_oob -ebb8: +block8: v33 = uextend.i64 v13 v34 = iadd_imm.i64 v0, -8 v35 = load.i64 v34 v14 = iadd v35, v33 v15 = load.i32 v14+12 call fn1(v0, v15) - jump ebb1 + jump block1 -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/regalloc/reload-779.clif b/cranelift/filetests/filetests/regalloc/reload-779.clif index f4e8e3ecff..ed6374c9fb 100644 --- a/cranelift/filetests/filetests/regalloc/reload-779.clif +++ b/cranelift/filetests/filetests/regalloc/reload-779.clif @@ -12,12 +12,12 @@ function u0:0(i64, i64, i64) system_v { fn1 = u0:94 sig0 fn2 = u0:95 sig1 -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): v3 = iconst.i16 0 - jump ebb1(v3) + jump block1(v3) -ebb1(v4: i16): +block1(v4: i16): call fn1() call fn2(v4) - jump ebb1(v4) + jump block1(v4) } diff --git a/cranelift/filetests/filetests/regalloc/reload.clif b/cranelift/filetests/filetests/regalloc/reload.clif index 0d39047dfd..88b20c1501 100644 --- a/cranelift/filetests/filetests/regalloc/reload.clif +++ b/cranelift/filetests/filetests/regalloc/reload.clif @@ -7,7 +7,7 @@ target riscv32 enable_e function %spill_return() -> i32 { fn0 = %foo() -> i32 system_v -ebb0: +block0: v0 = call fn0() ; check: $(reg=$V) = call fn0 ; check: v0 = spill $reg @@ -24,7 +24,7 @@ ebb0: ; on the stack. function %spilled_copy_arg(i32, i32, i32, i32, i32, i32, i32) -> i32 { -ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): +block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): ; not: copy ; check: v10 = fill v6 v10 = copy v6 @@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32): function %spilled_copy_result(i32) -> i32 { fn0 = %foo(i32) -ebb0(v0: i32): +block0(v0: i32): ; not: copy ; check: v1 = spill v0 v1 = copy v0 diff --git a/cranelift/filetests/filetests/regalloc/schedule-moves.clif b/cranelift/filetests/filetests/regalloc/schedule-moves.clif index afd652ece9..f46d8958f7 100644 --- a/cranelift/filetests/filetests/regalloc/schedule-moves.clif +++ b/cranelift/filetests/filetests/regalloc/schedule-moves.clif @@ -2,7 +2,7 @@ test regalloc target i686 haswell function %pr165() system_v { -ebb0: +block0: v0 = iconst.i32 0x0102_0304 v1 = iconst.i32 0x1102_0304 v2 = iconst.i32 0x2102_0304 @@ -20,7 +20,7 @@ ebb0: ; Same as above, but use so many registers that spilling is required. ; Note: This is also a candidate for using xchg instructions. function %emergency_spill() system_v { -ebb0: +block0: v0 = iconst.i32 0x0102_0304 v1 = iconst.i32 0x1102_0304 v2 = iconst.i32 0x2102_0304 diff --git a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-2.clif b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-2.clif index be64db792d..9737d4e163 100644 --- a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-2.clif +++ b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-2.clif @@ -4,16 +4,16 @@ set enable_pinned_reg=true target x86_64 haswell function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v { -ebb0(v0: i32, v1: i32, v2: i32, v3: i64): +block0(v0: i32, v1: i32, v2: i32, v3: i64): v236 = iconst.i32 0x4de9_bd37 v424 = iconst.i32 0 - jump ebb37(v424) + jump block37(v424) -ebb37(v65: i32): +block37(v65: i32): v433 = iconst.i32 0 - jump ebb40(v433) + jump block40(v433) -ebb40(v70: i32): +block40(v70: i32): v75 = iconst.i32 0 v259 = iconst.i32 0 v78 -> v259 @@ -28,17 +28,17 @@ ebb40(v70: i32): v272 = iconst.i32 0x4de9_bd37 v490, v273 = x86_smulx v100, v272 v493 = iconst.i32 0 - jump ebb61(v493) + jump block61(v493) -ebb61(v103: i32): +block61(v103: i32): v104 = iconst.i32 -23 v105 = iconst.i32 -23 v106 = popcnt v105 v500 = sshr_imm v104, 31 v501 = iconst.i32 0 - jump ebb64(v501) + jump block64(v501) -ebb64(v107: i32): +block64(v107: i32): v108 = iconst.i32 0 v109 = iconst.i32 0 v278 = iconst.i32 0 @@ -49,9 +49,9 @@ ebb64(v107: i32): v283 = iadd v281, v282 v111 -> v283 v112 = rotr v108, v283 - jump ebb65 + jump block65 -ebb65: +block65: v509 = iconst.i32 0 v510, v511 = x86_sdivmodx v107, v509, v112 v113 -> v510 diff --git a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-3.clif b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-3.clif index 771957c44e..8a9a040eb1 100644 --- a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-3.clif +++ b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var-3.clif @@ -4,16 +4,16 @@ set enable_pinned_reg=true target x86_64 haswell function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v { -ebb0(v0: i32, v1: i32, v2: i32, v3: i64): +block0(v0: i32, v1: i32, v2: i32, v3: i64): v5 = iconst.i32 -8 v114 = iconst.i32 0 v16 = iconst.i32 -8 v17 = popcnt v16 v192 = ifcmp_imm v17, -1 trapif ne v192, user0 - jump ebb12 + jump block12 -ebb12: +block12: v122 = iconst.i32 0 v123 = ushr_imm v122, 31 v124 = iadd v122, v123 @@ -23,51 +23,51 @@ ebb12: v31 -> v204 v210 = ifcmp_imm v31, -1 trapif ne v210, user0 - jump ebb18 + jump block18 -ebb18: +block18: v215 = iconst.i32 0 - jump ebb19(v215) + jump block19(v215) -ebb19(v32: i32): +block19(v32: i32): v35 = iconst.i32 0 v218 = ifcmp_imm v35, -1 trapif ne v218, user0 - jump ebb21 + jump block21 -ebb21: +block21: v223 = iconst.i32 0 - jump ebb22(v223) + jump block22(v223) -ebb22(v36: i32): +block22(v36: i32): v136 = iconst.i32 0 v40 -> v136 v227 = ifcmp_imm v136, -1 trapif ne v227, user0 - jump ebb24 + jump block24 -ebb24: +block24: v232 = iconst.i32 0 - jump ebb25(v232) + jump block25(v232) -ebb25(v41: i32): +block25(v41: i32): v142 = iconst.i32 0 v45 -> v142 v236 = ifcmp_imm v142, -1 trapif ne v236, user0 - jump ebb27 + jump block27 -ebb27: +block27: v241 = iconst.i32 0 - jump ebb28(v241) + jump block28(v241) -ebb28(v46: i32): +block28(v46: i32): v49 = iconst.i32 0 v244 = ifcmp_imm v49, -1 trapif ne v244, user0 - jump ebb30 + jump block30 -ebb30: +block30: v254 = iconst.i32 0 v53 -> v254 v54 = iconst.i32 -23 @@ -80,9 +80,9 @@ ebb30: v148 = iadd v146, v147 v57 -> v148 v58 = ishl v53, v148 - jump ebb35 + jump block35 -ebb35: +block35: v262 = iconst.i32 0 v263, v264 = x86_sdivmodx v46, v262, v58 v59 -> v263 @@ -93,9 +93,9 @@ ebb35: v280 = iconst.i32 0 v281 = ffcmp v61, v61 trapff ord v281, user0 - jump ebb41(v280) + jump block41(v280) -ebb41(v62: i32): +block41(v62: i32): v157 = iconst.i32 0 v158 = sshr_imm v157, 4 v159 = iconst.i32 0 @@ -103,9 +103,9 @@ ebb41(v62: i32): v75 -> v160 v308 = ifcmp_imm v160, -1 trapif ne v308, user0 - jump ebb52 + jump block52 -ebb52: +block52: v87 = iconst.i32 -23 v88 = iconst.i32 -23 v89 = popcnt v88 diff --git a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var.clif b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var.clif index 7182bea43a..475bfa0f47 100644 --- a/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var.clif +++ b/cranelift/filetests/filetests/regalloc/solver-fixedconflict-var.clif @@ -6,7 +6,7 @@ target x86_64 haswell ;; Test for the issue #1123; https://github.com/bytecodealliance/cranelift/issues/1123 function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v { -ebb0(v0: i32, v1: i32, v2: i32, v3: i64): +block0(v0: i32, v1: i32, v2: i32, v3: i64): v351 = iconst.i32 0x4de9_bd37 v31 = iconst.i32 -23 v35 = iconst.i32 0 @@ -24,68 +24,68 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i64): v53 = iconst.i32 0 v547 = ifcmp_imm v53, -1 trapif ne v547, user0 - jump ebb30 + jump block30 -ebb30: +block30: v75 = iconst.i32 0 v581 = ifcmp_imm v75, -1 trapif ne v581, user0 - jump ebb42 + jump block42 -ebb42: +block42: v136 = iconst.i32 0 v691 = ifcmp_imm v136, -1 trapif ne v691, user0 - jump ebb81 + jump block81 -ebb81: +block81: v158 = iconst.i32 0 v725 = ifcmp_imm v158, -1 trapif ne v725, user0 - jump ebb93 + jump block93 -ebb93: +block93: v760 = iconst.i32 0 - jump ebb106(v760) + jump block106(v760) -ebb106(v175: i32): +block106(v175: i32): v179 = iconst.i32 0 v180 = icmp_imm eq v179, 0 v183 = iconst.i32 0 v766 = ifcmp_imm v183, -1 trapif ne v766, user0 - jump ebb108 + jump block108 -ebb108: +block108: v771 = iconst.i32 0 - jump ebb109(v771) + jump block109(v771) -ebb109(v184: i32): +block109(v184: i32): v785 = iconst.i32 0 v193 -> v785 v791 = ifcmp_imm v193, -1 trapif ne v791, user0 - jump ebb117 + jump block117 -ebb117: +block117: v796 = iconst.i32 0 - jump ebb118(v796) + jump block118(v796) -ebb118(v194: i32): +block118(v194: i32): v203 = iconst.i32 -63 v809 = iconst.i32 0 v207 -> v809 v815 = ifcmp_imm v207, -1 trapif ne v815, user0 - jump ebb126 + jump block126 -ebb126: +block126: v209 = iconst.i32 0 v823 = ifcmp_imm v209, -1 trapif ne v823, user0 - jump ebb129 + jump block129 -ebb129: +block129: v213 = iconst.i32 -23 v214 = iconst.i32 -19 v215 = icmp_imm eq v214, 0 @@ -111,9 +111,9 @@ ebb129: v858, v859 = x86_sdivmodx v175, v857, v232 v233 -> v858 v915 = iconst.i32 0 - jump ebb163(v915) + jump block163(v915) -ebb163(v253: i32): +block163(v253: i32): v255 = iconst.i32 0 v256 = iconst.i32 -23 v257 = iconst.i32 -19 diff --git a/cranelift/filetests/filetests/regalloc/spill-noregs.clif b/cranelift/filetests/filetests/regalloc/spill-noregs.clif index c470b3355b..5acdd45b17 100644 --- a/cranelift/filetests/filetests/regalloc/spill-noregs.clif +++ b/cranelift/filetests/filetests/regalloc/spill-noregs.clif @@ -7,20 +7,20 @@ target x86_64 ; 'Ran out of GPR registers when inserting copy before v68 = icmp.i32 eq v66, v67', ; cranelift-codegen/src/regalloc/spilling.rs:425:28 message. ; -; The process_reg_uses() function is trying to insert a copy before the icmp instruction in ebb4 -; and runs out of registers to spill. Note that ebb7 has a lot of dead parameter values. +; The process_reg_uses() function is trying to insert a copy before the icmp instruction in block4 +; and runs out of registers to spill. Note that block7 has a lot of dead parameter values. ; -; The spiller was not releasing register pressure for dead EBB parameters. +; The spiller was not releasing register pressure for dead block parameters. function %pr223(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v { -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = iconst.i32 0 v3 = iconst.i64 0 v4 = iconst.i32 0xffff_ffff_bb3f_4a2c - brz v4, ebb5 - jump ebb1 + brz v4, block5 + jump block1 -ebb1: +block1: v5 = iconst.i32 0 v6 = copy.i64 v3 v7 = copy.i64 v3 @@ -33,10 +33,10 @@ ebb1: v14 = copy.i64 v3 v15 = copy.i64 v3 v16 = copy.i64 v3 - brnz v5, ebb4(v2, v3, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) - jump ebb2 + brnz v5, block4(v2, v3, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) + jump block2 -ebb2: +block2: v17 = iconst.i32 0 v18 = copy.i64 v3 v19 = copy.i64 v3 @@ -49,19 +49,19 @@ ebb2: v26 = copy.i64 v3 v27 = copy.i64 v3 v28 = copy.i64 v3 - brnz v17, ebb4(v2, v3, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) - jump ebb3 + brnz v17, block4(v2, v3, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) + jump block3 -ebb3: - jump ebb1 +block3: + jump block1 -ebb4(v29: i32, v30: i64, v31: i64, v32: i64, v33: i64, v34: i64, v35: i64, v36: i64, v37: i64, v38: i64, v39: i64, v40: i64, v41: i64): - jump ebb7(v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) +block4(v29: i32, v30: i64, v31: i64, v32: i64, v33: i64, v34: i64, v35: i64, v36: i64, v37: i64, v38: i64, v39: i64, v40: i64, v41: i64): + jump block7(v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) -ebb5: - jump ebb6 +block5: + jump block6 -ebb6: +block6: v42 = copy.i64 v3 v43 = copy.i64 v3 v44 = copy.i64 v3 @@ -73,103 +73,103 @@ ebb6: v50 = copy.i64 v3 v51 = copy.i64 v3 v52 = copy.i64 v3 - jump ebb7(v2, v3, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) + jump block7(v2, v3, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) -ebb7(v53: i32, v54: i64, v55: i64, v56: i64, v57: i64, v58: i64, v59: i64, v60: i64, v61: i64, v62: i64, v63: i64, v64: i64, v65: i64): +block7(v53: i32, v54: i64, v55: i64, v56: i64, v57: i64, v58: i64, v59: i64, v60: i64, v61: i64, v62: i64, v63: i64, v64: i64, v65: i64): v66 = iconst.i32 0 v67 = iconst.i32 0 v68 = icmp eq v66, v67 v69 = bint.i32 v68 - jump ebb8 + jump block8 -ebb8: - jump ebb9 +block8: + jump block9 -ebb9: +block9: v70 = iconst.i32 0xffff_ffff_ffff_912f - brz v70, ebb10 - jump ebb35 + brz v70, block10 + jump block35 -ebb10: +block10: v71 = iconst.i32 0 - brz v71, ebb11 - jump ebb27 + brz v71, block11 + jump block27 -ebb11: - jump ebb12 +block11: + jump block12 -ebb12: - jump ebb13 +block12: + jump block13 -ebb13: - jump ebb14 +block13: + jump block14 -ebb14: - jump ebb15 +block14: + jump block15 -ebb15: - jump ebb16 +block15: + jump block16 -ebb16: - jump ebb17 +block16: + jump block17 -ebb17: - jump ebb18 +block17: + jump block18 -ebb18: - jump ebb19 +block18: + jump block19 -ebb19: - jump ebb20 +block19: + jump block20 -ebb20: - jump ebb21 +block20: + jump block21 -ebb21: - jump ebb22 +block21: + jump block22 -ebb22: - jump ebb23 +block22: + jump block23 -ebb23: - jump ebb24 +block23: + jump block24 -ebb24: - jump ebb25 +block24: + jump block25 -ebb25: - jump ebb26 +block25: + jump block26 -ebb26: - jump ebb27 +block26: + jump block27 -ebb27: - jump ebb28 +block27: + jump block28 -ebb28: - jump ebb29 +block28: + jump block29 -ebb29: - jump ebb30 +block29: + jump block30 -ebb30: - jump ebb31 +block30: + jump block31 -ebb31: - jump ebb32 +block31: + jump block32 -ebb32: - jump ebb33 +block32: + jump block33 -ebb33: - jump ebb34 +block33: + jump block34 -ebb34: - jump ebb35 +block34: + jump block35 -ebb35: - jump ebb36 +block35: + jump block36 -ebb36: +block36: trap user0 } diff --git a/cranelift/filetests/filetests/regalloc/spill.clif b/cranelift/filetests/filetests/regalloc/spill.clif index 525921a374..23706cd2cf 100644 --- a/cranelift/filetests/filetests/regalloc/spill.clif +++ b/cranelift/filetests/filetests/regalloc/spill.clif @@ -24,8 +24,8 @@ function %pyramid(i32) -> i32 { ; check: ss1 = spill_slot 4 ; check: ss2 = spill_slot 4 ; not: spill_slot -ebb0(v1: i32): -; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1]) +block0(v1: i32): +; check: block0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1]) ; check: ,ss0]$WS v1 = spill $rv1 ; nextln: ,ss1]$WS $(link=$V) = spill $rlink ; not: spill @@ -71,7 +71,7 @@ ebb0(v1: i32): ; All values live across a call must be spilled function %across_call(i32) { fn0 = %foo(i32) -ebb0(v1: i32): +block0(v1: i32): ; check: v1 = spill call fn0(v1) ; check: call fn0 @@ -84,7 +84,7 @@ ebb0(v1: i32): ; The same value used for two function arguments. function %doubleuse(i32) { fn0 = %xx(i32, i32) -ebb0(v0: i32): +block0(v0: i32): ; check: $(c=$V) = copy v0 call fn0(v0, v0) ; check: call fn0(v0, $c) @@ -94,7 +94,7 @@ ebb0(v0: i32): ; The same value used as indirect callee and argument. function %doubleuse_icall1(i32) { sig0 = (i32) system_v -ebb0(v0: i32): +block0(v0: i32): ; not:copy call_indirect sig0, v0(v0) return @@ -103,7 +103,7 @@ ebb0(v0: i32): ; The same value used as indirect callee and two arguments. function %doubleuse_icall2(i32) { sig0 = (i32, i32) system_v -ebb0(v0: i32): +block0(v0: i32): ; check: $(c=$V) = copy v0 call_indirect sig0, v0(v0, v0) ; check: call_indirect sig0, v0(v0, $c) @@ -115,21 +115,21 @@ function %stackargs(i32, i32, i32, i32, i32, i32, i32, i32) -> i32 { ; check: ss0 = incoming_arg 4 ; check: ss1 = incoming_arg 4, offset 4 ; not: incoming_arg -ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32): +block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32): ; unordered: fill v6 ; unordered: fill v7 v10 = iadd v6, v7 return v10 } -; More EBB arguments than registers. -function %ebbargs(i32) -> i32 { -ebb0(v1: i32): +; More block arguments than registers. +function %blockargs(i32) -> i32 { +block0(v1: i32): ; check: v1 = spill v2 = iconst.i32 1 - jump ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2) + jump block1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2) -ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32): +block1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32): v22 = iadd v10, v11 v23 = iadd v22, v12 v24 = iadd v23, v13 @@ -145,18 +145,18 @@ ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: return v33 } -; Spilling an EBB argument to make room for a branch operand. +; Spilling a block argument to make room for a branch operand. function %brargs(i32) -> i32 { -ebb0(v1: i32): +block0(v1: i32): ; check: v1 = spill v2 = iconst.i32 1 - brnz v1, ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2) - jump ebb2 + brnz v1, block1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2) + jump block2 -ebb2: +block2: return v1 -ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32): +block1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32): v22 = iadd v10, v11 v23 = iadd v22, v12 v24 = iadd v23, v13 @@ -181,8 +181,8 @@ function %use_spilled_value(i32) -> i32 { ; check: ss0 = spill_slot 4 ; check: ss1 = spill_slot 4 ; check: ss2 = spill_slot 4 -ebb0(v1: i32): -; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1]) +block0(v1: i32): +; check: block0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1]) ; check: ,ss0]$WS v1 = spill $rv1 ; nextln: ,ss1]$WS $(link=$V) = spill $rlink ; not: spill diff --git a/cranelift/filetests/filetests/regalloc/unreachable_code.clif b/cranelift/filetests/filetests/regalloc/unreachable_code.clif index 0eac70fc6f..4c288a91dd 100644 --- a/cranelift/filetests/filetests/regalloc/unreachable_code.clif +++ b/cranelift/filetests/filetests/regalloc/unreachable_code.clif @@ -7,41 +7,41 @@ target x86_64 haswell ; This function contains unreachable blocks which trip up the register ; allocator if they don't get cleared out. function %unreachable_blocks(i64 vmctx) -> i32 baldrdash_system_v { -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i32 0 v2 = iconst.i32 0 - jump ebb2 + jump block2 -ebb2: - jump ebb4 +block2: + jump block4 -ebb4: - jump ebb2 +block4: + jump block2 ; Everything below this point is unreachable. -ebb3(v3: i32): +block3(v3: i32): v5 = iadd.i32 v2, v3 - jump ebb6 + jump block6 -ebb6: - jump ebb6 +block6: + jump block6 -ebb7(v6: i32): +block7(v6: i32): v7 = iadd.i32 v5, v6 - jump ebb8 + jump block8 -ebb8: - jump ebb10 +block8: + jump block10 -ebb10: - jump ebb8 +block10: + jump block8 -ebb9(v8: i32): +block9(v8: i32): v10 = iadd.i32 v7, v8 - jump ebb1(v10) + jump block1(v10) -ebb1(v11: i32): +block1(v11: i32): return v11 } diff --git a/cranelift/filetests/filetests/regalloc/x86-regres.clif b/cranelift/filetests/filetests/regalloc/x86-regres.clif index 0b9bf12736..e239d0ad37 100644 --- a/cranelift/filetests/filetests/regalloc/x86-regres.clif +++ b/cranelift/filetests/filetests/regalloc/x86-regres.clif @@ -2,48 +2,48 @@ test regalloc target i686 ; regex: V=v\d+ -; regex: EBB=ebb\d+ +; regex: BB=block\d+ -; The value v9 appears both as the branch control and one of the EBB arguments -; in the brnz instruction in ebb2. It also happens that v7 and v9 are assigned +; The value v9 appears both as the branch control and one of the block arguments +; in the brnz instruction in block2. It also happens that v7 and v9 are assigned ; to the same register, so v9 doesn't need to be moved before the brnz. ; ; This ended up confusong the constraint solver which had not made a record of ; the fixed register assignment for v9 since it was already in the correct ; register. function %pr147(i32) -> i32 system_v { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 0 v2 = iconst.i32 1 v3 = iconst.i32 0 - jump ebb2(v3, v2, v0) + jump block2(v3, v2, v0) - ; check: $(splitEdge=$EBB): - ; check: jump ebb2($V, $V, v9) + ; check: $(splitEdge=$BB): + ; check: jump block2($V, $V, v9) -ebb2(v4: i32, v5: i32, v7: i32): - ; check: ebb2 +block2(v4: i32, v5: i32, v7: i32): + ; check: block2 v6 = iadd v4, v5 v8 = iconst.i32 -1 ; v7 is killed here and v9 gets the same register. v9 = iadd v7, v8 ; check: v9 = iadd v7, v8 - ; Here v9 the brnz control appears to interfere with v9 the EBB argument, + ; Here v9 the brnz control appears to interfere with v9 the block argument, ; so divert_fixed_input_conflicts() calls add_var(v9), which is ok. The ; add_var sanity checks got confused when no fixed assignment could be ; found for v9. ; ; We should be able to handle this situation without making copies of v9. - brnz v9, ebb2(v5, v6, v9) + brnz v9, block2(v5, v6, v9) ; check: brnz v9, $splitEdge - jump ebb3 + jump block3 -ebb3: +block3: return v5 } function %select_i64(i64, i64, i32) -> i64 { -ebb0(v0: i64, v1: i64, v2: i32): +block0(v0: i64, v1: i64, v2: i32): v3 = select v2, v0, v1 return v3 } diff --git a/cranelift/filetests/filetests/regress/allow-relaxation-shrink.clif b/cranelift/filetests/filetests/regress/allow-relaxation-shrink.clif index 3dd8250656..995e7c5f64 100644 --- a/cranelift/filetests/filetests/regress/allow-relaxation-shrink.clif +++ b/cranelift/filetests/filetests/regress/allow-relaxation-shrink.clif @@ -9,33 +9,33 @@ function u0:0(i64, i64) -> i64 system_v { sig0 = (i64) -> i64 system_v fn0 = u0:8 sig0 -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v3 = stack_addr.i64 ss1 v5 = call fn0(v1) v6 = iconst.i64 0 v8 = iconst.i64 0 - jump ebb3(v6, v1, v8) + jump block3(v6, v1, v8) -ebb3(v39: i64, v40: i64, v42: i64): +block3(v39: i64, v40: i64, v42: i64): v9 = load.i64 v3 v11 = icmp_imm ugt v9, 1 v12 = bint.i8 v11 v13 = uextend.i32 v12 v14 = icmp_imm eq v13, 0 - brnz v14, ebb4 - jump ebb5 + brnz v14, block4 + jump block5 -ebb4: +block4: v18 = icmp_imm.i64 eq v40, 0 v19 = bint.i8 v18 v20 = uextend.i32 v19 - brz v20, ebb6 - jump ebb7 + brz v20, block6 + jump block7 -ebb7: +block7: trap user0 -ebb5: +block5: v22 = iconst.i32 1 v23 = ishl.i64 v39, v22 v25 = iconst.i64 1 @@ -47,9 +47,9 @@ ebb5: v31 = iconst.i32 1 v32 = ushr v30, v31 store v32, v3 - jump ebb3(v27, v40, v29) + jump block3(v27, v40, v29) -ebb6: +block6: v38 = iconst.i64 0 return v38 } diff --git a/cranelift/filetests/filetests/safepoint/basic.clif b/cranelift/filetests/filetests/safepoint/basic.clif index cb52fbf66b..7e0088b23b 100644 --- a/cranelift/filetests/filetests/safepoint/basic.clif +++ b/cranelift/filetests/filetests/safepoint/basic.clif @@ -3,69 +3,69 @@ set enable_safepoints=true target x86_64 function %test(i32, r64, r64) -> r64 { - ebb0(v0: i32, v1:r64, v2:r64): - jump ebb1(v0) - ebb1(v3: i32): + block0(v0: i32, v1:r64, v2:r64): + jump block1(v0) + block1(v3: i32): v4 = irsub_imm v3, 1 - jump ebb2(v4) - ebb2(v5: i32): + jump block2(v4) + block2(v5: i32): resumable_trap interrupt - brz v5, ebb1(v5) - jump ebb3 - ebb3: + brz v5, block1(v5) + jump block3 + block3: v6 = null.r64 v7 = is_null v6 - brnz v7, ebb2(v0) - jump ebb4 - ebb4: - brnz v0, ebb5 - jump ebb6 - ebb5: + brnz v7, block2(v0) + jump block4 + block4: + brnz v0, block5 + jump block6 + block5: return v1 - ebb6: + block6: return v2 } ; sameln: function %test(i32 [%rdi], r64 [%rsi], r64 [%rdx]) -> r64 [%rax] fast { -; nextln: ebb0(v0: i32 [%rdi], v1: r64 [%rsi], v2: r64 [%rdx]): +; nextln: block0(v0: i32 [%rdi], v1: r64 [%rsi], v2: r64 [%rdx]): ; nextln: v10 = copy v0 -; nextln: jump ebb1(v10) +; nextln: jump block1(v10) ; nextln: -; nextln: ebb7: +; nextln: block7: ; nextln: regmove.i32 v5, %rcx -> %rax -; nextln: jump ebb1(v5) +; nextln: jump block1(v5) ; nextln: -; nextln: ebb1(v3: i32 [%rax]): +; nextln: block1(v3: i32 [%rax]): ; nextln: v8 = iconst.i32 1 ; nextln: v4 = isub v8, v3 -; nextln: jump ebb2(v4) +; nextln: jump block2(v4) ; nextln: -; nextln: ebb8: +; nextln: block8: ; nextln: v9 = copy.i32 v0 ; nextln: regmove v9, %rax -> %rcx -; nextln: jump ebb2(v9) +; nextln: jump block2(v9) ; nextln: -; nextln: ebb2(v5: i32 [%rcx]): +; nextln: block2(v5: i32 [%rcx]): ; nextln: safepoint v1, v2 ; nextln: resumable_trap interrupt -; nextln: brz v5, ebb7 -; nextln: jump ebb3 +; nextln: brz v5, block7 +; nextln: jump block3 ; nextln: -; nextln: ebb3: +; nextln: block3: ; nextln: v6 = null.r64 ; nextln: v7 = is_null v6 -; nextln: brnz v7, ebb8 -; nextln: jump ebb4 +; nextln: brnz v7, block8 +; nextln: jump block4 ; nextln: -; nextln: ebb4: -; nextln: brnz.i32 v0, ebb5 -; nextln: jump ebb6 +; nextln: block4: +; nextln: brnz.i32 v0, block5 +; nextln: jump block6 ; nextln: -; nextln: ebb5: +; nextln: block5: ; nextln: regmove.r64 v1, %rsi -> %rax ; nextln: return v1 ; nextln: -; nextln: ebb6: +; nextln: block6: ; nextln: regmove.r64 v2, %rdx -> %rax ; nextln: return v2 ; nextln: } diff --git a/cranelift/filetests/filetests/safepoint/call.clif b/cranelift/filetests/filetests/safepoint/call.clif index 9e9583093b..53c9246323 100644 --- a/cranelift/filetests/filetests/safepoint/call.clif +++ b/cranelift/filetests/filetests/safepoint/call.clif @@ -7,15 +7,15 @@ function %direct() -> r64 { fn1 = %one() -> r64 fn2 = %two() -> i32, r64 -ebb0: +block0: call fn0() v1 = call fn1() v2, v3 = call fn2() - brz v2, ebb2 - jump ebb1 -ebb1: + brz v2, block2 + jump block1 +block1: return v1 -ebb2: +block2: v4 = call fn1() return v3 } @@ -30,7 +30,7 @@ ebb2: ; nextln: fn1 = %one sig1 ; nextln: fn2 = %two sig2 ; nextln: -; nextln: ebb0: +; nextln: block0: ; nextln: v5 = func_addr.i64 fn0 ; nextln: call_indirect sig0, v5() ; nextln: v6 = func_addr.i64 fn1 @@ -40,15 +40,15 @@ ebb2: ; nextln: safepoint v1 ; nextln: v2, v10 = call_indirect sig2, v7() ; nextln: v3 = spill v10 -; nextln: brz v2, ebb2 -; nextln: jump ebb1 +; nextln: brz v2, block2 +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v11 = fill.r64 v1 ; nextln: regmove v11, %r15 -> %rax ; nextln: return v11 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v8 = func_addr.i64 fn1 ; nextln: safepoint v3 ; nextln: v4 = call_indirect sig1, v8() diff --git a/cranelift/filetests/filetests/simple_gvn/basic.clif b/cranelift/filetests/filetests/simple_gvn/basic.clif index 72155a324c..107c3897d1 100644 --- a/cranelift/filetests/filetests/simple_gvn/basic.clif +++ b/cranelift/filetests/filetests/simple_gvn/basic.clif @@ -1,7 +1,7 @@ test simple-gvn function %simple_redundancy(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = iadd v0, v1 v3 = iadd v0, v1 v4 = imul v2, v3 @@ -10,7 +10,7 @@ ebb0(v0: i32, v1: i32): } function %cascading_redundancy(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = iadd v0, v1 v3 = iadd v0, v1 v4 = imul v2, v3 @@ -21,22 +21,22 @@ ebb0(v0: i32, v1: i32): } function %redundancies_on_some_paths(i32, i32, i32) -> i32 { -ebb0(v0: i32, v1: i32, v2: i32): +block0(v0: i32, v1: i32, v2: i32): v3 = iadd v0, v1 - brz v3, ebb1 - jump ebb3 + brz v3, block1 + jump block3 -ebb3: +block3: v4 = iadd v0, v1 - jump ebb2(v4) -; check: jump ebb2(v3) + jump block2(v4) +; check: jump block2(v3) -ebb1: +block1: v5 = iadd v0, v1 - jump ebb2(v5) -; check: jump ebb2(v3) + jump block2(v5) +; check: jump block2(v3) -ebb2(v6: i32): +block2(v6: i32): v7 = iadd v0, v1 v8 = iadd v6, v7 ; check: v8 = iadd v6, v3 diff --git a/cranelift/filetests/filetests/simple_gvn/readonly.clif b/cranelift/filetests/filetests/simple_gvn/readonly.clif index 3c01299064..802396f4f8 100644 --- a/cranelift/filetests/filetests/simple_gvn/readonly.clif +++ b/cranelift/filetests/filetests/simple_gvn/readonly.clif @@ -7,7 +7,7 @@ function %eliminate_redundant_global_loads(i32, i64 vmctx) { gv1 = load.i64 notrap aligned readonly gv0 heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = heap_addr.i64 heap0, v0, 1 diff --git a/cranelift/filetests/filetests/simple_gvn/reject.clif b/cranelift/filetests/filetests/simple_gvn/reject.clif index 00d88e5e66..c4613af4dc 100644 --- a/cranelift/filetests/filetests/simple_gvn/reject.clif +++ b/cranelift/filetests/filetests/simple_gvn/reject.clif @@ -1,7 +1,7 @@ test simple-gvn function %other_side_effects(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): regmove v0, %10 -> %20 regmove v0, %10 -> %20 regmove v0, %20 -> %10 @@ -11,7 +11,7 @@ ebb0(v0: i32): } function %differing_typevars() -> i64 { -ebb0: +block0: v0 = iconst.i32 7 v1 = iconst.i64 7 v2 = iconst.i64 8 @@ -25,7 +25,7 @@ ebb0: } function %cpu_flags() -> b1 { -ebb0: +block0: v0 = iconst.i32 7 v1 = iconst.i32 8 v2 = ifcmp v0, v1 @@ -41,7 +41,7 @@ ebb0: } function %spill() -> i32 { -ebb0: +block0: v0 = iconst.i32 7 v1 = spill v0 v2 = fill v1 diff --git a/cranelift/filetests/filetests/simple_gvn/scopes.clif b/cranelift/filetests/filetests/simple_gvn/scopes.clif index bf4e7fac94..63a425ad3f 100644 --- a/cranelift/filetests/filetests/simple_gvn/scopes.clif +++ b/cranelift/filetests/filetests/simple_gvn/scopes.clif @@ -1,29 +1,29 @@ test simple-gvn function %two_diamonds(i32, i32, i32, i32, i32) { -ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32): +block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32): v5 = iconst.i32 16 ; check: v5 = iconst.i32 16 - brz v0, ebb1 - jump ebb5 + brz v0, block1 + jump block5 -ebb5: +block5: v6 = iconst.i32 17 ; check: v6 = iconst.i32 17 v7 = iconst.i32 16 ; not: v7 = iconst.i32 16 - jump ebb2 + jump block2 -ebb1: +block1: v8 = iconst.i32 18 ; check: v8 = iconst.i32 18 v9 = iconst.i32 17 ; check: v9 = iconst.i32 17 v10 = iconst.i32 16 ; not: v10 = iconst.i32 16 - jump ebb2 + jump block2 -ebb2: +block2: v11 = iconst.i32 19 ; check: v11 = iconst.i32 19 v12 = iconst.i32 18 @@ -32,10 +32,10 @@ ebb2: ; check: v13 = iconst.i32 17 v14 = iconst.i32 16 ; not: v14 = iconst.i32 16 - brz v1, ebb3 - jump ebb6 + brz v1, block3 + jump block6 -ebb6: +block6: v15 = iconst.i32 20 ; check: v15 = iconst.i32 20 v16 = iconst.i32 19 @@ -46,9 +46,9 @@ ebb6: ; not: v18 = iconst.i32 17 v19 = iconst.i32 16 ; not: v19 = iconst.i32 16 - jump ebb4 + jump block4 -ebb3: +block3: v20 = iconst.i32 21 ; check: v20 = iconst.i32 21 v21 = iconst.i32 20 @@ -61,9 +61,9 @@ ebb3: ; not: v24 = iconst.i32 17 v25 = iconst.i32 16 ; not: v25 = iconst.i32 16 - jump ebb4 + jump block4 -ebb4: +block4: v26 = iconst.i32 22 ; check: v26 = iconst.i32 22 v27 = iconst.i32 21 diff --git a/cranelift/filetests/filetests/simple_preopt/branch.clif b/cranelift/filetests/filetests/simple_preopt/branch.clif index f3535b1e70..21cc7afda3 100644 --- a/cranelift/filetests/filetests/simple_preopt/branch.clif +++ b/cranelift/filetests/filetests/simple_preopt/branch.clif @@ -2,80 +2,80 @@ test simple_preopt target x86_64 function %icmp_to_brz_fold(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = icmp_imm eq v0, 0 - brnz v1, ebb1 - jump ebb2 -ebb1: + brnz v1, block1 + jump block2 +block1: v3 = iconst.i32 1 return v3 -ebb2: +block2: v4 = iconst.i32 2 return v4 } ; sameln: function %icmp_to_brz_fold -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = icmp_imm eq v0, 0 -; nextln: brnz v0, ebb2 -; nextln: jump ebb1 +; nextln: brnz v0, block2 +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v3 = iconst.i32 1 ; nextln: return v3 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v4 = iconst.i32 2 ; nextln: return v4 ; nextln: } function %icmp_to_brz_inverted_fold(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = icmp_imm ne v0, 0 - brz v1, ebb1 - jump ebb2 -ebb1: + brz v1, block1 + jump block2 +block1: v3 = iconst.i32 1 return v3 -ebb2: +block2: v4 = iconst.i32 2 return v4 } ; sameln: function %icmp_to_brz_inve -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = icmp_imm ne v0, 0 -; nextln: brnz v0, ebb2 -; nextln: jump ebb1 +; nextln: brnz v0, block2 +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v3 = iconst.i32 1 ; nextln: return v3 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v4 = iconst.i32 2 ; nextln: return v4 ; nextln: } function %br_icmp_inversion(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): - br_icmp ugt v0, v1, ebb1 - jump ebb2 -ebb1: +block0(v0: i32, v1: i32): + br_icmp ugt v0, v1, block1 + jump block2 +block1: v2 = iconst.i32 1 return v2 -ebb2: +block2: v3 = iconst.i32 2 return v3 } ; sameln: function %br_icmp_inversio -; nextln: ebb0(v0: i32, v1: i32): -; nextln: br_icmp ule v0, v1, ebb2 -; nextln: jump ebb1 +; nextln: block0(v0: i32, v1: i32): +; nextln: br_icmp ule v0, v1, block2 +; nextln: jump block1 ; nextln: -; nextln: ebb1: +; nextln: block1: ; nextln: v2 = iconst.i32 1 ; nextln: return v2 ; nextln: -; nextln: ebb2: +; nextln: block2: ; nextln: v3 = iconst.i32 2 ; nextln: return v3 ; nextln: } diff --git a/cranelift/filetests/filetests/simple_preopt/div_by_const_indirect.clif b/cranelift/filetests/filetests/simple_preopt/div_by_const_indirect.clif index c111113197..101e4eb201 100644 --- a/cranelift/filetests/filetests/simple_preopt/div_by_const_indirect.clif +++ b/cranelift/filetests/filetests/simple_preopt/div_by_const_indirect.clif @@ -4,7 +4,7 @@ target x86_64 baseline ; Cases where the denominator is created by an iconst function %indir_udiv32(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 7 v2 = udiv v0, v1 ; check: iconst.i32 7 @@ -19,7 +19,7 @@ ebb0(v0: i32): } function %indir_sdiv32(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 -17 v2 = sdiv v0, v1 ; check: iconst.i32 -17 @@ -33,7 +33,7 @@ ebb0(v0: i32): } function %indir_udiv64(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i64 1337 v2 = udiv v0, v1 ; check: iconst.i64 1337 @@ -45,7 +45,7 @@ ebb0(v0: i64): } function %indir_sdiv64(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i64 -90210 v2 = sdiv v0, v1 ; check: iconst.i64 0xffff_ffff_fffe_9f9e diff --git a/cranelift/filetests/filetests/simple_preopt/div_by_const_non_power_of_2.clif b/cranelift/filetests/filetests/simple_preopt/div_by_const_non_power_of_2.clif index 2a16699aae..b1225a28d5 100644 --- a/cranelift/filetests/filetests/simple_preopt/div_by_const_non_power_of_2.clif +++ b/cranelift/filetests/filetests/simple_preopt/div_by_const_non_power_of_2.clif @@ -5,7 +5,7 @@ target i686 baseline ; complex case (mul, sub, shift, add, shift) function %t_udiv32_p7(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 7 ; check: iconst.i32 0x2492_4925 ; check: umulhi v0, v2 @@ -19,7 +19,7 @@ ebb0(v0: i32): ; simple case (mul, shift) function %t_udiv32_p125(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 125 ; check: iconst.i32 0x1062_4dd3 ; check: umulhi v0, v2 @@ -30,7 +30,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul) function %t_udiv32_p641(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 641 ; check: iconst.i32 0x0066_3d81 ; check: v3 = umulhi v0, v2 @@ -43,7 +43,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul, add-sign-bit) function %t_sdiv32_n6(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -6 ; check: iconst.i32 0xffff_ffff_d555_5555 ; check: smulhi v0, v2 @@ -55,7 +55,7 @@ ebb0(v0: i32): ; simple case (mul, shift, add-sign-bit) function %t_sdiv32_n5(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -5 ; check: iconst.i32 0xffff_ffff_9999_9999 ; check: smulhi v0, v2 @@ -68,7 +68,7 @@ ebb0(v0: i32): ; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit) function %t_sdiv32_n3(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -3 ; check: iconst.i32 0x5555_5555 ; check: smulhi v0, v2 @@ -82,7 +82,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul, add-sign-bit) function %t_sdiv32_p6(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 6 ; check: iconst.i32 0x2aaa_aaab ; check: smulhi v0, v2 @@ -94,7 +94,7 @@ ebb0(v0: i32): ; case d > 0 && M < 0 (mull, add, shift, add-sign-bit) function %t_sdiv32_p7(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 7 ; check: iconst.i32 0xffff_ffff_9249_2493 ; check: smulhi v0, v2 @@ -108,7 +108,7 @@ ebb0(v0: i32): ; simple case (mul, shift, add-sign-bit) function %t_sdiv32_p625(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 625 ; check: iconst.i32 0x68db_8bad ; check: smulhi v0, v2 @@ -124,7 +124,7 @@ ebb0(v0: i32): ; complex case (mul, sub, shift, add, shift) function %t_udiv64_p7(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 7 ; check: iconst.i64 0x2492_4924_9249_2493 ; check: umulhi v0, v2 @@ -138,7 +138,7 @@ ebb0(v0: i64): ; simple case (mul, shift) function %t_udiv64_p9(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 9 ; check: iconst.i64 0xe38e_38e3_8e38_e38f ; check: umulhi v0, v2 @@ -149,7 +149,7 @@ ebb0(v0: i64): ; complex case (mul, sub, shift, add, shift) function %t_udiv64_p125(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 125 ; check: iconst.i64 0x0624_dd2f_1a9f_be77 ; check: umulhi v0, v2 @@ -163,7 +163,7 @@ ebb0(v0: i64): ; simple case w/ shift by zero (mul) function %t_udiv64_p274177(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 274177 ; check: iconst.i64 0x3d30_f19c_d101 ; check: v3 = umulhi v0, v2 @@ -176,7 +176,7 @@ ebb0(v0: i64): ; simple case (mul, shift, add-sign-bit) function %t_sdiv64_n625(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -625 ; check: iconst.i64 0xcb92_3a29_c779_a6b5 ; check: smulhi v0, v2 @@ -189,7 +189,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_sdiv64_n6(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -6 ; check: iconst.i64 0xd555_5555_5555_5555 ; check: smulhi v0, v2 @@ -201,7 +201,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_sdiv64_n5(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -5 ; check: iconst.i64 0x9999_9999_9999_9999 ; check: smulhi v0, v2 @@ -214,7 +214,7 @@ ebb0(v0: i64): ; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit) function %t_sdiv64_n3(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -3 ; check: iconst.i64 0x5555_5555_5555_5555 ; check: smulhi v0, v2 @@ -228,7 +228,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_sdiv64_p6(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 6 ; check: iconst.i64 0x2aaa_aaaa_aaaa_aaab ; check: smulhi v0, v2 @@ -240,7 +240,7 @@ ebb0(v0: i64): ; case d > 0 && M < 0 (mul, add, shift, add-sign-bit) function %t_sdiv64_p15(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 15 ; check: iconst.i64 0x8888_8888_8888_8889 ; check: smulhi v0, v2 @@ -254,7 +254,7 @@ ebb0(v0: i64): ; simple case (mul, shift, add-sign-bit) function %t_sdiv64_p625(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 625 ; check: iconst.i64 0x346d_c5d6_3886_594b ; check: smulhi v0, v2 diff --git a/cranelift/filetests/filetests/simple_preopt/div_by_const_power_of_2.clif b/cranelift/filetests/filetests/simple_preopt/div_by_const_power_of_2.clif index fb9c1744f5..83e9f95c8a 100644 --- a/cranelift/filetests/filetests/simple_preopt/div_by_const_power_of_2.clif +++ b/cranelift/filetests/filetests/simple_preopt/div_by_const_power_of_2.clif @@ -5,7 +5,7 @@ target i686 baseline ; ignored function %t_udiv32_p0(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 0 ; check: udiv_imm v0, 0 return v1 @@ -13,7 +13,7 @@ ebb0(v0: i32): ; converted to a nop function %t_udiv32_p1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 1 ; check: nop return v1 @@ -21,7 +21,7 @@ ebb0(v0: i32): ; shift function %t_udiv32_p2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 2 ; check: ushr_imm v0, 1 return v1 @@ -29,7 +29,7 @@ ebb0(v0: i32): ; shift function %t_udiv32_p2p31(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = udiv_imm v0, 0x8000_0000 ; check: ushr_imm v0, 31 return v1 @@ -40,7 +40,7 @@ ebb0(v0: i32): ; ignored function %t_udiv64_p0(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 0 ; check: udiv_imm v0, 0 return v1 @@ -48,7 +48,7 @@ ebb0(v0: i64): ; converted to a nop function %t_udiv64_p1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 1 ; check: nop return v1 @@ -56,7 +56,7 @@ ebb0(v0: i64): ; shift function %t_udiv64_p2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 2 ; check: ushr_imm v0, 1 return v1 @@ -64,7 +64,7 @@ ebb0(v0: i64): ; shift function %t_udiv64_p2p63(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = udiv_imm v0, 0x8000_0000_0000_0000 ; check: ushr_imm v0, 63 return v1 @@ -75,7 +75,7 @@ ebb0(v0: i64): ; ignored function %t_sdiv32_p0(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 0 ; check: sdiv_imm v0, 0 return v1 @@ -83,7 +83,7 @@ ebb0(v0: i32): ; converted to a nop function %t_sdiv32_p1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 1 ; check: nop return v1 @@ -91,7 +91,7 @@ ebb0(v0: i32): ; ignored function %t_sdiv32_n1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -1 ; check: sdiv_imm v0, -1 return v1 @@ -99,7 +99,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_p2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 2 ; check: ushr_imm v0, 31 ; check: iadd v0, v2 @@ -110,7 +110,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_n2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -2 ; check: ushr_imm v0, 31 ; check: iadd v0, v2 @@ -121,7 +121,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_p4(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 4 ; check: v2 = sshr_imm v0, 1 ; check: ushr_imm v2, 30 @@ -134,7 +134,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_n4(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 30 @@ -146,7 +146,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_p2p30(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, 0x4000_0000 ; check: sshr_imm v0, 29 ; check: ushr_imm v2, 2 @@ -158,7 +158,7 @@ ebb0(v0: i32): ; shift function %t_sdiv32_n2p30(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -0x4000_0000 ; check: sshr_imm v0, 29 ; check: ushr_imm v2, 2 @@ -171,7 +171,7 @@ ebb0(v0: i32): ; there's no positive version of this, since -(-0x8000_0000) isn't ; representable. function %t_sdiv32_n2p31(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = sdiv_imm v0, -0x8000_0000 ; check: sshr_imm v0, 30 ; check: ushr_imm v2, 1 @@ -186,7 +186,7 @@ ebb0(v0: i32): ; ignored function %t_sdiv64_p0(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 0 ; check: sdiv_imm v0, 0 return v1 @@ -194,7 +194,7 @@ ebb0(v0: i64): ; converted to a nop function %t_sdiv64_p1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 1 ; check: nop return v1 @@ -202,7 +202,7 @@ ebb0(v0: i64): ; ignored function %t_sdiv64_n1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -1 ; check: sdiv_imm v0, -1 return v1 @@ -210,7 +210,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_p2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 2 ; check: ushr_imm v0, 63 ; check: iadd v0, v2 @@ -221,7 +221,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_n2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -2 ; check: ushr_imm v0, 63 ; check: iadd v0, v2 @@ -232,7 +232,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_p4(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 62 @@ -244,7 +244,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_n4(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 62 @@ -256,7 +256,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_p2p62(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, 0x4000_0000_0000_0000 ; check: sshr_imm v0, 61 ; check: ushr_imm v2, 2 @@ -268,7 +268,7 @@ ebb0(v0: i64): ; shift function %t_sdiv64_n2p62(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -0x4000_0000_0000_0000 ; check: sshr_imm v0, 61 ; check: ushr_imm v2, 2 @@ -281,7 +281,7 @@ ebb0(v0: i64): ; there's no positive version of this, since -(-0x8000_0000_0000_0000) isn't ; representable. function %t_sdiv64_n2p63(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = sdiv_imm v0, -0x8000_0000_0000_0000 ; check: sshr_imm v0, 62 ; check: ushr_imm v2, 1 diff --git a/cranelift/filetests/filetests/simple_preopt/fold-extended-move-wraparound.clif b/cranelift/filetests/filetests/simple_preopt/fold-extended-move-wraparound.clif index 074507a786..44342481b8 100644 --- a/cranelift/filetests/filetests/simple_preopt/fold-extended-move-wraparound.clif +++ b/cranelift/filetests/filetests/simple_preopt/fold-extended-move-wraparound.clif @@ -5,7 +5,7 @@ function %wraparound(i64 vmctx) -> f32 system_v { gv0 = vmctx gv1 = iadd_imm.i64 gv0, 48 -ebb35(v0: i64): +block35(v0: i64): v88 = iconst.i64 0 v89 = iconst.i64 0x8000_0000_0000_0000 v90 = ishl_imm v88, 0x8000_0000_0000_0000 diff --git a/cranelift/filetests/filetests/simple_preopt/rem_by_const_non_power_of_2.clif b/cranelift/filetests/filetests/simple_preopt/rem_by_const_non_power_of_2.clif index 40c5e1d828..00d0d9f16e 100644 --- a/cranelift/filetests/filetests/simple_preopt/rem_by_const_non_power_of_2.clif +++ b/cranelift/filetests/filetests/simple_preopt/rem_by_const_non_power_of_2.clif @@ -5,7 +5,7 @@ target i686 baseline ; complex case (mul, sub, shift, add, shift) function %t_urem32_p7(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 7 ; check: iconst.i32 0x2492_4925 ; check: umulhi v0, v2 @@ -20,7 +20,7 @@ ebb0(v0: i32): ; simple case (mul, shift) function %t_urem32_p125(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 125 ; check: iconst.i32 0x1062_4dd3 ; check: umulhi v0, v2 @@ -32,7 +32,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul) function %t_urem32_p641(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 641 ; check: iconst.i32 0x0066_3d81 ; check: umulhi v0, v2 @@ -46,7 +46,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul, add-sign-bit) function %t_srem32_n6(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -6 ; check: iconst.i32 0xffff_ffff_d555_5555 ; check: smulhi v0, v2 @@ -59,7 +59,7 @@ ebb0(v0: i32): ; simple case (mul, shift, add-sign-bit) function %t_srem32_n5(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -5 ; check: iconst.i32 0xffff_ffff_9999_9999 ; check: smulhi v0, v2 @@ -73,7 +73,7 @@ ebb0(v0: i32): ; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit) function %t_srem32_n3(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -3 ; check: iconst.i32 0x5555_5555 ; check: smulhi v0, v2 @@ -88,7 +88,7 @@ ebb0(v0: i32): ; simple case w/ shift by zero (mul, add-sign-bit) function %t_srem32_p6(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 6 ; check: iconst.i32 0x2aaa_aaab ; check: smulhi v0, v2 @@ -101,7 +101,7 @@ ebb0(v0: i32): ; case d > 0 && M < 0 (mull, add, shift, add-sign-bit) function %t_srem32_p7(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 7 ; check: iconst.i32 0xffff_ffff_9249_2493 ; check: smulhi v0, v2 @@ -116,7 +116,7 @@ ebb0(v0: i32): ; simple case (mul, shift, add-sign-bit) function %t_srem32_p625(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 625 ; check: iconst.i32 0x68db_8bad ; check: smulhi v0, v2 @@ -133,7 +133,7 @@ ebb0(v0: i32): ; complex case (mul, sub, shift, add, shift) function %t_urem64_p7(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 7 ; check: umulhi v0, v2 ; check: isub v0, v3 @@ -147,7 +147,7 @@ ebb0(v0: i64): ; simple case (mul, shift) function %t_urem64_p9(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 9 ; check: iconst.i64 0xe38e_38e3_8e38_e38f ; check: umulhi v0, v2 @@ -159,7 +159,7 @@ ebb0(v0: i64): ; complex case (mul, sub, shift, add, shift) function %t_urem64_p125(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 125 ; check: iconst.i64 0x0624_dd2f_1a9f_be77 ; check: umulhi v0, v2 @@ -174,7 +174,7 @@ ebb0(v0: i64): ; simple case w/ shift by zero (mul) function %t_urem64_p274177(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 274177 ; check: iconst.i64 0x3d30_f19c_d101 ; check: umulhi v0, v2 @@ -188,7 +188,7 @@ ebb0(v0: i64): ; simple case (mul, shift, add-sign-bit) function %t_srem64_n625(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -625 ; check: iconst.i64 0xcb92_3a29_c779_a6b5 ; check: smulhi v0, v2 @@ -202,7 +202,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_srem64_n6(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -6 ; check: iconst.i64 0xd555_5555_5555_5555 ; check: smulhi v0, v2 @@ -215,7 +215,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_srem64_n5(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -5 ; check: iconst.i64 0x9999_9999_9999_9999 ; check: smulhi v0, v2 @@ -229,7 +229,7 @@ ebb0(v0: i64): ; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit) function %t_srem64_n3(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -3 ; check: iconst.i64 0x5555_5555_5555_5555 ; check: smulhi v0, v2 @@ -244,7 +244,7 @@ ebb0(v0: i64): ; simple case w/ zero shift (mul, add-sign-bit) function %t_srem64_p6(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 6 ; check: iconst.i64 0x2aaa_aaaa_aaaa_aaab ; check: smulhi v0, v2 @@ -257,7 +257,7 @@ ebb0(v0: i64): ; case d > 0 && M < 0 (mul, add, shift, add-sign-bit) function %t_srem64_p15(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 15 ; check: iconst.i64 0x8888_8888_8888_8889 ; check: smulhi v0, v2 @@ -272,7 +272,7 @@ ebb0(v0: i64): ; simple case (mul, shift, add-sign-bit) function %t_srem64_p625(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 625 ; check: iconst.i64 0x346d_c5d6_3886_594b ; check: smulhi v0, v2 diff --git a/cranelift/filetests/filetests/simple_preopt/rem_by_const_power_of_2.clif b/cranelift/filetests/filetests/simple_preopt/rem_by_const_power_of_2.clif index 09eebfa684..1fe085e37c 100644 --- a/cranelift/filetests/filetests/simple_preopt/rem_by_const_power_of_2.clif +++ b/cranelift/filetests/filetests/simple_preopt/rem_by_const_power_of_2.clif @@ -5,7 +5,7 @@ target i686 baseline ; ignored function %t_urem32_p0(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 0 ; check: urem_imm v0, 0 return v1 @@ -13,7 +13,7 @@ ebb0(v0: i32): ; converted to constant zero function %t_urem32_p1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 1 ; check: iconst.i32 0 return v1 @@ -21,7 +21,7 @@ ebb0(v0: i32): ; shift function %t_urem32_p2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 2 ; check: band_imm v0, 1 return v1 @@ -29,7 +29,7 @@ ebb0(v0: i32): ; shift function %t_urem32_p2p31(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = urem_imm v0, 0x8000_0000 ; check: band_imm v0, 0x7fff_ffff return v1 @@ -40,7 +40,7 @@ ebb0(v0: i32): ; ignored function %t_urem64_p0(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 0 ; check: urem_imm v0, 0 return v1 @@ -48,7 +48,7 @@ ebb0(v0: i64): ; converted to constant zero function %t_urem64_p1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 1 ; check: iconst.i64 0 return v1 @@ -56,7 +56,7 @@ ebb0(v0: i64): ; shift function %t_urem64_p2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 2 ; check: band_imm v0, 1 return v1 @@ -64,7 +64,7 @@ ebb0(v0: i64): ; shift function %t_urem64_p2p63(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = urem_imm v0, 0x8000_0000_0000_0000 ; check: band_imm v0, 0x7fff_ffff_ffff_ffff return v1 @@ -75,7 +75,7 @@ ebb0(v0: i64): ; ignored function %t_srem32_n1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -1 ; check: srem_imm v0, -1 return v1 @@ -83,7 +83,7 @@ ebb0(v0: i32): ; ignored function %t_srem32_p0(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 0 ; check: srem_imm v0, 0 return v1 @@ -91,7 +91,7 @@ ebb0(v0: i32): ; converted to constant zero function %t_srem32_p1(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 1 ; check: iconst.i32 0 return v1 @@ -99,7 +99,7 @@ ebb0(v0: i32): ; shift function %t_srem32_p2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 2 ; check: ushr_imm v0, 31 ; check: iadd v0, v2 @@ -110,7 +110,7 @@ ebb0(v0: i32): ; shift function %t_srem32_n2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -2 ; check: ushr_imm v0, 31 ; check: iadd v0, v2 @@ -121,7 +121,7 @@ ebb0(v0: i32): ; shift function %t_srem32_p4(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 30 @@ -133,7 +133,7 @@ ebb0(v0: i32): ; shift function %t_srem32_n4(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 30 @@ -145,7 +145,7 @@ ebb0(v0: i32): ; shift function %t_srem32_p2p30(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, 0x4000_0000 ; check: sshr_imm v0, 29 ; check: ushr_imm v2, 2 @@ -157,7 +157,7 @@ ebb0(v0: i32): ; shift function %t_srem32_n2p30(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -0x4000_0000 ; check: sshr_imm v0, 29 ; check: ushr_imm v2, 2 @@ -170,7 +170,7 @@ ebb0(v0: i32): ; there's no positive version of this, since -(-0x8000_0000) isn't ; representable. function %t_srem32_n2p31(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = srem_imm v0, -0x8000_0000 ; check: sshr_imm v0, 30 ; check: ushr_imm v2, 1 @@ -185,7 +185,7 @@ ebb0(v0: i32): ; ignored function %t_srem64_n1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -1 ; check: srem_imm v0, -1 return v1 @@ -193,7 +193,7 @@ ebb0(v0: i64): ; ignored function %t_srem64_p0(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 0 ; check: srem_imm v0, 0 return v1 @@ -201,7 +201,7 @@ ebb0(v0: i64): ; converted to constant zero function %t_srem64_p1(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 1 ; check: iconst.i64 0 return v1 @@ -209,7 +209,7 @@ ebb0(v0: i64): ; shift function %t_srem64_p2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 2 ; check: ushr_imm v0, 63 ; check: iadd v0, v2 @@ -220,7 +220,7 @@ ebb0(v0: i64): ; shift function %t_srem64_n2(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -2 ; check: ushr_imm v0, 63 ; check: iadd v0, v2 @@ -231,7 +231,7 @@ ebb0(v0: i64): ; shift function %t_srem64_p4(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 62 @@ -243,7 +243,7 @@ ebb0(v0: i64): ; shift function %t_srem64_n4(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -4 ; check: sshr_imm v0, 1 ; check: ushr_imm v2, 62 @@ -255,7 +255,7 @@ ebb0(v0: i64): ; shift function %t_srem64_p2p62(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, 0x4000_0000_0000_0000 ; check: sshr_imm v0, 61 ; check: ushr_imm v2, 2 @@ -267,7 +267,7 @@ ebb0(v0: i64): ; shift function %t_srem64_n2p62(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -0x4000_0000_0000_0000 ; check: sshr_imm v0, 61 ; check: ushr_imm v2, 2 @@ -280,7 +280,7 @@ ebb0(v0: i64): ; there's no positive version of this, since -(-0x8000_0000_0000_0000) isn't ; representable. function %t_srem64_n2p63(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = srem_imm v0, -0x8000_0000_0000_0000 ; check: sshr_imm v0, 62 ; check: ushr_imm v2, 1 diff --git a/cranelift/filetests/filetests/simple_preopt/simplify32.clif b/cranelift/filetests/filetests/simple_preopt/simplify32.clif index 45add1b7a3..2582fd69aa 100644 --- a/cranelift/filetests/filetests/simple_preopt/simplify32.clif +++ b/cranelift/filetests/filetests/simple_preopt/simplify32.clif @@ -4,40 +4,40 @@ target i686 ;; 32-bits platforms. function %iadd_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = iadd v0, v1 return v2 } ; sameln: function %iadd_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = iadd_imm v0, 2 ; nextln: return v2 ; nextln: } function %isub_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = isub v0, v1 return v2 } ; sameln: function %isub_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = iadd_imm v0, -2 ; nextln: return v2 ; nextln: } function %icmp_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = icmp slt v0, v1 v3 = bint.i32 v2 return v3 } ; sameln: function %icmp_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = icmp_imm slt v0, 2 ; nextln: v3 = bint.i32 v2 @@ -47,13 +47,13 @@ ebb0(v0: i32): ;; Don't simplify operations that would get illegal because of lack of native ;; support. function %iadd_imm(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = iconst.i64 2 v2 = iadd v0, v1 return v2 } ; sameln: function %iadd_imm -; nextln: ebb0(v0: i64): +; nextln: block0(v0: i64): ; nextln: v1 = iconst.i64 2 ; nextln: v2 = iadd v0, v1 ; nextln: return v2 diff --git a/cranelift/filetests/filetests/simple_preopt/simplify64.clif b/cranelift/filetests/filetests/simple_preopt/simplify64.clif index db485ce773..4ceabdc335 100644 --- a/cranelift/filetests/filetests/simple_preopt/simplify64.clif +++ b/cranelift/filetests/filetests/simple_preopt/simplify64.clif @@ -4,40 +4,40 @@ target x86_64 ;; 64-bits platforms. function %iadd_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = iadd v0, v1 return v2 } ; sameln: function %iadd_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = iadd_imm v0, 2 ; nextln: return v2 ; nextln: } function %isub_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = isub v0, v1 return v2 } ; sameln: function %isub_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = iadd_imm v0, -2 ; nextln: return v2 ; nextln: } function %icmp_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = icmp slt v0, v1 v3 = bint.i32 v2 return v3 } ; sameln: function %icmp_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = icmp_imm slt v0, 2 ; nextln: v3 = bint.i32 v2 @@ -45,18 +45,18 @@ ebb0(v0: i32): ; nextln: } function %brz_bint(i32) { -ebb0(v0: i32): +block0(v0: i32): v3 = icmp_imm slt v0, 0 v1 = bint.i32 v3 v2 = select v1, v1, v1 trapz v1, user0 - brz v1, ebb1 - jump ebb2 + brz v1, block1 + jump block2 -ebb1: +block1: return -ebb2: +block2: return } ; sameln: function %brz_bint @@ -65,17 +65,17 @@ ebb2: ; nextln: v1 = bint.i32 v3 ; nextln: v2 = select v3, v1, v1 ; nextln: trapz v3, user0 -; nextln: brnz v3, ebb2 -; nextln: jump ebb1 +; nextln: brnz v3, block2 +; nextln: jump block1 function %irsub_imm(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 2 v2 = isub v1, v0 return v2 } ; sameln: function %irsub_imm -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = irsub_imm v0, 2 ; nextln: return v2 @@ -85,14 +85,14 @@ ebb0(v0: i32): ;; 8 -> 16 function %uextend_8_16() -> i16 { -ebb0: +block0: v0 = iconst.i16 37 v1 = ishl_imm v0, 8 v2 = ushr_imm v1, 8 return v2 } ; sameln: function %uextend_8_16 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i16 37 ; nextln: v1 = ishl_imm v0, 8 ; nextln: v3 = ireduce.i8 v0 @@ -101,14 +101,14 @@ ebb0: ; nextln: } function %sextend_8_16() -> i16 { -ebb0: +block0: v0 = iconst.i16 37 v1 = ishl_imm v0, 8 v2 = sshr_imm v1, 8 return v2 } ; sameln: function %sextend_8_16 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i16 37 ; nextln: v1 = ishl_imm v0, 8 ; nextln: v3 = ireduce.i8 v0 @@ -118,14 +118,14 @@ ebb0: ;; 8 -> 32 function %uextend_8_32() -> i32 { -ebb0: +block0: v0 = iconst.i32 37 v1 = ishl_imm v0, 24 v2 = ushr_imm v1, 24 return v2 } ; sameln: function %uextend_8_32 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 37 ; nextln: v1 = ishl_imm v0, 24 ; nextln: v3 = ireduce.i8 v0 @@ -134,14 +134,14 @@ ebb0: ; nextln: } function %sextend_8_32() -> i32 { -ebb0: +block0: v0 = iconst.i32 37 v1 = ishl_imm v0, 24 v2 = sshr_imm v1, 24 return v2 } ; sameln: function %sextend_8_32 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 37 ; nextln: v1 = ishl_imm v0, 24 ; nextln: v3 = ireduce.i8 v0 @@ -151,14 +151,14 @@ ebb0: ;; 16 -> 32 function %uextend_16_32() -> i32 { -ebb0: +block0: v0 = iconst.i32 37 v1 = ishl_imm v0, 16 v2 = ushr_imm v1, 16 return v2 } ; sameln: function %uextend_16_32 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 37 ; nextln: v1 = ishl_imm v0, 16 ; nextln: v3 = ireduce.i16 v0 @@ -167,14 +167,14 @@ ebb0: ; nextln: } function %sextend_16_32() -> i32 { -ebb0: +block0: v0 = iconst.i32 37 v1 = ishl_imm v0, 16 v2 = sshr_imm v1, 16 return v2 } ; sameln: function %sextend_16_32 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i32 37 ; nextln: v1 = ishl_imm v0, 16 ; nextln: v3 = ireduce.i16 v0 @@ -184,14 +184,14 @@ ebb0: ;; 8 -> 64 function %uextend_8_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 56 v2 = ushr_imm v1, 56 return v2 } ; sameln: function %uextend_8_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 56 ; nextln: v3 = ireduce.i8 v0 @@ -200,14 +200,14 @@ ebb0: ; nextln: } function %sextend_8_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 56 v2 = sshr_imm v1, 56 return v2 } ; sameln: function %sextend_8_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 56 ; nextln: v3 = ireduce.i8 v0 @@ -217,14 +217,14 @@ ebb0: ;; 16 -> 64 function %uextend_16_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 48 v2 = ushr_imm v1, 48 return v2 } ; sameln: function %uextend_16_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 48 ; nextln: v3 = ireduce.i16 v0 @@ -233,14 +233,14 @@ ebb0: ; nextln: } function %sextend_16_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 48 v2 = sshr_imm v1, 48 return v2 } ; sameln: function %sextend_16_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 48 ; nextln: v3 = ireduce.i16 v0 @@ -250,14 +250,14 @@ ebb0: ;; 32 -> 64 function %uextend_32_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 32 v2 = ushr_imm v1, 32 return v2 } ; sameln: function %uextend_32_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 32 ; nextln: v3 = ireduce.i32 v0 @@ -266,14 +266,14 @@ ebb0: ; nextln: } function %sextend_32_64() -> i64 { -ebb0: +block0: v0 = iconst.i64 37 v1 = ishl_imm v0, 32 v2 = sshr_imm v1, 32 return v2 } ; sameln: function %sextend_32_64 -; nextln: ebb0: +; nextln: block0: ; nextln: v0 = iconst.i64 37 ; nextln: v1 = ishl_imm v0, 32 ; nextln: v3 = ireduce.i32 v0 @@ -282,13 +282,13 @@ ebb0: ; nextln: } function %add_imm_fold(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd_imm v0, 42 v2 = iadd_imm v1, -42 return v2 } ; sameln: function %add_imm_fold(i32) -; nextln: ebb0(v0: i32): +; nextln: block0(v0: i32): ; nextln: v2 -> v0 ; nextln: v1 = iadd_imm v0, 42 ; nextln: nop diff --git a/cranelift/filetests/filetests/verifier/bad_layout.clif b/cranelift/filetests/filetests/verifier/bad_layout.clif index 034dd7843f..0cc2d2ed6f 100644 --- a/cranelift/filetests/filetests/verifier/bad_layout.clif +++ b/cranelift/filetests/filetests/verifier/bad_layout.clif @@ -1,21 +1,21 @@ test verifier function %test_1(i32) { - ebb0(v0: i32): + block0(v0: i32): return ; error: terminator return } function %test_2(i32) { - ebb0(v0: i32): - jump ebb2 ; error: a terminator instruction was encountered before the end of ebb0 - brz v0, ebb3 - ebb2: - jump ebb3 - ebb3: + block0(v0: i32): + jump block2 ; error: a terminator instruction was encountered before the end of block0 + brz v0, block3 + block2: + jump block3 + block3: return } function %test_3(i32) { ; Ok - ebb0(v0: i32): + block0(v0: i32): return } diff --git a/cranelift/filetests/filetests/verifier/bitcast.clif b/cranelift/filetests/filetests/verifier/bitcast.clif index eb5303cfc0..98ac9c6b35 100644 --- a/cranelift/filetests/filetests/verifier/bitcast.clif +++ b/cranelift/filetests/filetests/verifier/bitcast.clif @@ -2,21 +2,21 @@ test verifier ; bitcast between two types of equal size if ok function %valid_bitcast1(i32) -> f32 { ; Ok -ebb0(v0: i32): +block0(v0: i32): v1 = bitcast.f32 v0 return v1 } ; bitcast to a type larger than the operand is ok function %valid_bitcast2(i32) -> i64 { ; Ok -ebb0(v0: i32): +block0(v0: i32): v1 = bitcast.i64 v0 return v1 } ; bitcast to a smaller type is not ok function %bad_bitcast(i64) -> i32 { -ebb0(v0: i64): +block0(v0: i64): v1 = bitcast.i32 v0 ; error: The bitcast argument v0 doesn't fit in a type of 32 bits return v1 } diff --git a/cranelift/filetests/filetests/verifier/defs_dominates_uses.clif b/cranelift/filetests/filetests/verifier/defs_dominates_uses.clif index fcbeb14816..c7b3b752a8 100644 --- a/cranelift/filetests/filetests/verifier/defs_dominates_uses.clif +++ b/cranelift/filetests/filetests/verifier/defs_dominates_uses.clif @@ -3,14 +3,14 @@ test verifier ; Test verification that uses properly dominate defs. function %non_dominating(i32) -> i32 system_v { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd.i32 v2, v0 ; error: uses value v2 from non-dominating v2 = iadd.i32 v1, v0 return v2 } function %inst_uses_its_own_values(i32) -> i32 system_v { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd.i32 v1, v0 ; error: uses value v1 from itself return v1 } diff --git a/cranelift/filetests/filetests/verifier/flags.clif b/cranelift/filetests/filetests/verifier/flags.clif index 8273160510..dc370c58cb 100644 --- a/cranelift/filetests/filetests/verifier/flags.clif +++ b/cranelift/filetests/filetests/verifier/flags.clif @@ -3,7 +3,7 @@ target i686 ; Simple, correct use of CPU flags. function %simple(i32) -> i32 { - ebb0(v0: i32): + block0(v0: i32): [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 [Op2seti_abcd#490] v2 = trueif ugt v1 [Op2urm_noflags_abcd#4b6] v3 = bint.i32 v2 @@ -12,7 +12,7 @@ function %simple(i32) -> i32 { ; Overlapping flag values of different types. function %overlap(i32, f32) -> i32 { - ebb0(v0: i32, v1: f32): + block0(v0: i32, v1: f32): [DynRexOp1rcmp#39] v2 = ifcmp v0, v0 [Op2fcmp#42e] v3 = ffcmp v1, v1 [Op2setf_abcd#490] v4 = trueff gt v3 ; error: conflicting live CPU flags: v2 and v3 @@ -24,7 +24,7 @@ function %overlap(i32, f32) -> i32 { ; CPU flags clobbered by arithmetic. function %clobbered(i32) -> i32 { - ebb0(v0: i32): + block0(v0: i32): [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 [DynRexOp1rr#01] v2 = iadd v0, v0 ; error: encoding clobbers live CPU flags in v1 [Op2seti_abcd#490] v3 = trueif ugt v1 @@ -34,7 +34,7 @@ function %clobbered(i32) -> i32 { ; CPU flags not clobbered by load. function %live_across_load(i32) -> i32 { - ebb0(v0: i32): + block0(v0: i32): [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 [Op1ld#8b] v2 = load.i32 v0 [Op2seti_abcd#490] v3 = trueif ugt v1 @@ -42,35 +42,35 @@ function %live_across_load(i32) -> i32 { [Op1ret#c3] return v4 } -; Correct use of CPU flags across EBB. -function %live_across_ebb(i32) -> i32 { - ebb0(v0: i32): +; Correct use of CPU flags across block. +function %live_across_block(i32) -> i32 { + block0(v0: i32): [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 - [Op1jmpb#eb] jump ebb1 - ebb1: + [Op1jmpb#eb] jump block1 + block1: [Op2seti_abcd#490] v2 = trueif ugt v1 [Op2urm_noflags_abcd#4b6] v3 = bint.i32 v2 [Op1ret#c3] return v3 } -function %live_across_ebb_backwards(i32) -> i32 { - ebb0(v0: i32): - [Op1jmpb#eb] jump ebb2 - ebb1: +function %live_across_block_backwards(i32) -> i32 { + block0(v0: i32): + [Op1jmpb#eb] jump block2 + block1: [Op2seti_abcd#490] v2 = trueif ugt v1 [Op2urm_noflags_abcd#4b6] v3 = bint.i32 v2 [Op1ret#c3] return v3 - ebb2: + block2: [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 - [Op1jmpb#eb] jump ebb1 + [Op1jmpb#eb] jump block1 } ; Flags live into loop. function %live_into_loop(i32) -> i32 { - ebb0(v0: i32): + block0(v0: i32): [DynRexOp1rcmp#39] v1 = ifcmp v0, v0 - [Op1jmpb#eb] jump ebb1 - ebb1: + [Op1jmpb#eb] jump block1 + block1: [Op2seti_abcd#490] v2 = trueif ugt v1 - [Op1jmpb#eb] jump ebb1 + [Op1jmpb#eb] jump block1 } diff --git a/cranelift/filetests/filetests/verifier/globals.clif b/cranelift/filetests/filetests/verifier/globals.clif index 4882cae2ee..1a44cf8001 100644 --- a/cranelift/filetests/filetests/verifier/globals.clif +++ b/cranelift/filetests/filetests/verifier/globals.clif @@ -6,14 +6,14 @@ function %load_base_type(i64 vmctx) { gv1 = load.i32 notrap aligned gv0 gv2 = load.i32 notrap aligned gv1 ; error: base gv1 has type i32, which is not the pointer type i64 -ebb0(v0: i64): +block0(v0: i64): return } function %global_value_wrong_type(i64 vmctx) { gv0 = vmctx -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i32 gv0 ; error: global_value instruction with type i32 references global value with type i64 return } diff --git a/cranelift/filetests/filetests/verifier/heap.clif b/cranelift/filetests/filetests/verifier/heap.clif index 2c73b726ba..ffd6bb7ac4 100644 --- a/cranelift/filetests/filetests/verifier/heap.clif +++ b/cranelift/filetests/filetests/verifier/heap.clif @@ -6,7 +6,7 @@ function %heap_base_type(i64 vmctx) { gv1 = load.i32 notrap aligned gv0 heap0 = static gv1, offset_guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64 -ebb0(v0: i64): +block0(v0: i64): return } @@ -14,7 +14,7 @@ function %invalid_base(i64 vmctx) { gv0 = vmctx heap0 = dynamic gv1, bound gv0, offset_guard 0x1000, index_type i64 ; error: invalid base global value gv1 -ebb0(v0: i64): +block0(v0: i64): return } @@ -22,7 +22,7 @@ function %invalid_bound(i64 vmctx) { gv0 = vmctx heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i64 ; error: invalid bound global value gv1 -ebb0(v0: i64): +block0(v0: i64): return } @@ -31,7 +31,7 @@ function %heap_bound_type(i64 vmctx) { gv1 = load.i16 notrap aligned gv0 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16 -ebb0(v0: i64): +block0(v0: i64): return } @@ -39,7 +39,7 @@ function %heap_addr_index_type(i64 vmctx, i64) { gv0 = vmctx heap0 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32 -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = heap_addr.i64 heap0, v1, 0; error: index type i64 differs from heap index type i32 return } diff --git a/cranelift/filetests/filetests/verifier/jump_table.clif b/cranelift/filetests/filetests/verifier/jump_table.clif index bafd751f2e..67cd935320 100644 --- a/cranelift/filetests/filetests/verifier/jump_table.clif +++ b/cranelift/filetests/filetests/verifier/jump_table.clif @@ -1,19 +1,19 @@ test verifier function %br_invalid_default(i64) { - jt0 = jump_table [ebb1, ebb1] + jt0 = jump_table [block1, block1] -ebb0(v0: i64): - br_table.i64 v0, ebb2, jt0 ; error: invalid ebb reference ebb2 -ebb1: +block0(v0: i64): + br_table.i64 v0, block2, jt0 ; error: invalid block reference block2 +block1: return } function %br(i64) { - jt0 = jump_table [ebb1, ebb2] ; error: invalid ebb reference ebb2 + jt0 = jump_table [block1, block2] ; error: invalid block reference block2 -ebb0(v0: i64): - br_table.i64 v0, ebb1, jt0 -ebb1: +block0(v0: i64): + br_table.i64 v0, block1, jt0 +block1: return } diff --git a/cranelift/filetests/filetests/verifier/memory.clif b/cranelift/filetests/filetests/verifier/memory.clif index cbaddcb13b..496b71c815 100644 --- a/cranelift/filetests/filetests/verifier/memory.clif +++ b/cranelift/filetests/filetests/verifier/memory.clif @@ -4,13 +4,13 @@ function %cycle() { gv0 = load.i32 notrap aligned gv1 ; error: global value cycle: [gv0, gv1] gv1 = load.i32 notrap aligned gv0-32 -ebb1: +block1: return } function %self_cycle() { gv0 = load.i32 notrap aligned gv0 ; error: global value cycle: [gv0] -ebb1: +block1: return } diff --git a/cranelift/filetests/filetests/verifier/scalar-to-vector.clif b/cranelift/filetests/filetests/verifier/scalar-to-vector.clif index 927abdafc2..1d04db9957 100644 --- a/cranelift/filetests/filetests/verifier/scalar-to-vector.clif +++ b/cranelift/filetests/filetests/verifier/scalar-to-vector.clif @@ -3,7 +3,7 @@ set enable_simd=true target x86_64 function %scalar_to_vector() { -ebb0: +block0: v0 = iconst.i32 42 v1 = scalar_to_vector.f32x4 v0 ; error: arg 0 (v0) has type i32, expected f32 return diff --git a/cranelift/filetests/filetests/verifier/simd-lane-index.clif b/cranelift/filetests/filetests/verifier/simd-lane-index.clif index 064254c0e2..2f7ca8d095 100644 --- a/cranelift/filetests/filetests/verifier/simd-lane-index.clif +++ b/cranelift/filetests/filetests/verifier/simd-lane-index.clif @@ -3,7 +3,7 @@ set enable_simd target x86_64 function %insertlane_i32x4() { -ebb0: +block0: v0 = vconst.i32x4 [0 0 0 0] v1 = iconst.i32 42 v2 = insertlane v0, 4, v1 ; error: The lane 4 does not index into the type i32x4 @@ -11,7 +11,7 @@ ebb0: } function %insertlane_b16x8() { -ebb0: +block0: v0 = vconst.b16x8 [false false false false false false false false] v1 = bconst.b16 true v2 = insertlane v0, 8, v1 ; error: The lane 8 does not index into the type b16x8 @@ -19,7 +19,7 @@ ebb0: } function %insertlane_f64x2() { -ebb0: +block0: v0 = vconst.f64x2 0x00 v1 = f64const 0x0.1 v2 = insertlane v0, 2, v1 ; error: The lane 2 does not index into the type f64x2 @@ -27,14 +27,14 @@ ebb0: } function %extractlane_i32x4() { -ebb0: +block0: v0 = vconst.i32x4 [0 0 0 0] v1 = extractlane v0, 4 ; error: The lane 4 does not index into the type i32x4 return } function %extractlane_b8x16() { -ebb0: +block0: v0 = vconst.b8x16 0x00 v1 = extractlane v0, 16 ; error: The lane 16 does not index into the type b8x16 return diff --git a/cranelift/filetests/filetests/verifier/table.clif b/cranelift/filetests/filetests/verifier/table.clif index 7502e00044..204ae5c93a 100644 --- a/cranelift/filetests/filetests/verifier/table.clif +++ b/cranelift/filetests/filetests/verifier/table.clif @@ -6,7 +6,7 @@ function %table_base_type(i64 vmctx) { gv1 = load.i32 notrap aligned gv0 table0 = dynamic gv1, element_size 1, bound gv1, index_type i32 ; error: table base has type i32, which is not the pointer type i64 -ebb0(v0: i64): +block0(v0: i64): return } @@ -14,7 +14,7 @@ function %invalid_base(i64 vmctx) { gv0 = vmctx table0 = dynamic gv1, bound gv0, element_size 1, index_type i64 ; error: invalid base global value gv1 -ebb0(v0: i64): +block0(v0: i64): return } @@ -22,7 +22,7 @@ function %invalid_bound(i64 vmctx) { gv0 = vmctx table0 = dynamic gv0, bound gv1, element_size 1, index_type i64 ; error: invalid bound global value gv1 -ebb0(v0: i64): +block0(v0: i64): return } @@ -31,7 +31,7 @@ function %table_bound_type(i64 vmctx) { gv1 = load.i16 notrap aligned gv0 table0 = dynamic gv0, bound gv1, element_size 1, index_type i32 ; error: table index type i32 differs from the type of its bound, i16 -ebb0(v0: i64): +block0(v0: i64): return } @@ -40,7 +40,7 @@ function %table_addr_index_type(i64 vmctx, i64) { gv1 = load.i32 notrap aligned gv0 table0 = dynamic gv0, element_size 1, bound gv1, index_type i32 -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = table_addr.i64 table0, v1, +0; error: index type i64 differs from table index type i32 return } diff --git a/cranelift/filetests/filetests/verifier/type_check.clif b/cranelift/filetests/filetests/verifier/type_check.clif index ce61b81a10..c708ca76ad 100644 --- a/cranelift/filetests/filetests/verifier/type_check.clif +++ b/cranelift/filetests/filetests/verifier/type_check.clif @@ -1,40 +1,40 @@ test verifier function %entry_block_signature_mismatch(i32) { - ebb0: ; error: entry block parameters (0) must match function signature (1) + block0: ; error: entry block parameters (0) must match function signature (1) return } function %entry_block_arg_type(i32) { - ebb0(v0: f32): ; error: entry block parameter 0 expected to have type i32, got f32 + block0(v0: f32): ; error: entry block parameter 0 expected to have type i32, got f32 return } function %incorrect_arg_type(i32, b1) -> i32 { - ebb0(v0: i32, v1: b1): + block0(v0: i32, v1: b1): v2 = iadd v0, v1 ; error: arg 1 (v1) has type b1, expected i32 return v2 } function %incorrect_return_type() -> f32 { - ebb0: + block0: v0 = iconst.i32 1 return v0 ; error: arg 0 (v0) has type i32, must match function signature of f32 } function %too_many_return_values() { - ebb0: + block0: v0 = iconst.i32 1 return v0 ; error: arguments of return must match function signature } function %too_few_return_values() -> f32, i64 { - ebb0: + block0: return ; error: arguments of return must match function signature } function %type_mismatch_controlling_variable() { - ebb0: + block0: v0 = iconst.i32 5 v1 = iconst.i64 6 v2 = iadd v0, v1 ; error: arg 1 (v1) has type i64, expected i32 @@ -43,14 +43,14 @@ function %type_mismatch_controlling_variable() { function %fn_call_too_few_args() { fn2 = %great_fn(i32, f32) - ebb0: + block0: call fn2() ; error: mismatched argument count for `call fn2()`: got 0, expected 2 return } function %fn_call_too_many_args() { fn5 = %best_fn() - ebb0: + block0: v0 = iconst.i64 56 v1 = f32const 0.0 call fn5(v0, v1) ; error: mismatched argument count for `call fn5(v0, v1)`: got 2, expected 0 @@ -59,56 +59,56 @@ function %fn_call_too_many_args() { function %fn_call_incorrect_arg_type(i64) { sig9 = (f32) - ebb0(v0: i64): + block0(v0: i64): v1 = iconst.i32 56 call_indirect sig9, v0(v1) ; error: arg 0 (v1) has type i32, expected f32 return } -; TODO: Should we instead just verify that jump tables contain no EBBs that take arguments? This +; TODO: Should we instead just verify that jump tables contain no blocks that take arguments? This ; error doesn't occur if no instruction uses the jump table. function %jump_table_args() { - jt1 = jump_table [ebb1] - ebb0: + jt1 = jump_table [block1] + block0: v0 = iconst.i32 0 - br_table v0, ebb2, jt1 ; error: takes no arguments, but had target ebb1 with 1 arguments + br_table v0, block2, jt1 ; error: takes no arguments, but had target block1 with 1 arguments - ebb1(v5: i32): + block1(v5: i32): return - ebb2: + block2: return } function %jump_args() { - ebb0: + block0: v0 = iconst.i16 10 v3 = iconst.i64 20 - jump ebb1(v0, v3) ; error: arg 0 (v0) has type i16, expected i64 + jump block1(v0, v3) ; error: arg 0 (v0) has type i16, expected i64 ; error: arg 1 (v3) has type i64, expected i16 - ebb1(v10: i64, v11: i16): + block1(v10: i64, v11: i16): return } function %jump_args2() { - ebb0: + block0: v0 = iconst.i16 10 v3 = iconst.i64 20 - brz v0, ebb1(v0, v3) ; error: arg 0 (v0) has type i16, expected i64 + brz v0, block1(v0, v3) ; error: arg 0 (v0) has type i16, expected i64 ; error: arg 1 (v3) has type i64, expected i16 - jump ebb1(v3, v0) - ebb1(v10: i64, v11: i16): + jump block1(v3, v0) + block1(v10: i64, v11: i16): return } function %bad_extend() { -ebb0: +block0: v0 = iconst.i32 10 v1 = uextend.i16 v0 ; error: input i32 must be smaller than output i16 return } function %bad_reduce() { -ebb0: +block0: v0 = iconst.i32 10 v1 = ireduce.i64 v0 ; error: input i32 must be larger than output i64 return diff --git a/cranelift/filetests/filetests/verifier/undeclared_vmctx.clif b/cranelift/filetests/filetests/verifier/undeclared_vmctx.clif index 1b760b3ff3..a48e7a0ef6 100644 --- a/cranelift/filetests/filetests/verifier/undeclared_vmctx.clif +++ b/cranelift/filetests/filetests/verifier/undeclared_vmctx.clif @@ -3,7 +3,7 @@ test verifier ; Using a vmctx global value without declaring it first leads to an error. function %vmglobal_err(i64) -> i64 { gv4 = vmctx ; error: undeclared vmctx reference -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i64 gv4 return v1 } @@ -11,7 +11,7 @@ ebb0(v0: i64): ; If it is declared, all is fine. function %vmglobal_ok(i64 vmctx) -> i64 { gv4 = vmctx -ebb0(v0: i64): +block0(v0: i64): v1 = global_value.i64 gv4 return v1 } diff --git a/cranelift/filetests/filetests/verifier/unreachable_code.clif b/cranelift/filetests/filetests/verifier/unreachable_code.clif index 7ea7dd49b0..0a12aac8d0 100644 --- a/cranelift/filetests/filetests/verifier/unreachable_code.clif +++ b/cranelift/filetests/filetests/verifier/unreachable_code.clif @@ -1,22 +1,22 @@ test verifier function %test() -> i32 { ; Ok -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i32 0 - jump ebb2 + jump block2 -ebb2: - jump ebb4 +block2: + jump block4 -ebb4: - jump ebb2 +block4: + jump block2 -ebb3(v2: i32): +block3(v2: i32): v4 = iadd.i32 v1, v2 - jump ebb9(v4) + jump block9(v4) -ebb9(v7: i32): +block9(v7: i32): v9 = iadd.i32 v2, v7 return v9 @@ -24,22 +24,22 @@ ebb9(v7: i32): ; Using a function argument in an unreachable block is ok. function %arg(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd_imm v0, 1 return v1 -ebb1: +block1: v10 = iadd_imm v0, 10 return v10 } -; Using an EBB argument from an unreachable block is not ok. +; Using a block argument from an unreachable block is not ok. function %arg2(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iadd v0, v10 ; error: uses value arg from non-dominating return v1 -ebb1(v10: i32): +block1(v10: i32): v11 = iadd v0, v10 return v11 } diff --git a/cranelift/filetests/filetests/wasm/control.clif b/cranelift/filetests/filetests/wasm/control.clif index 66c82df937..d00c5b3166 100644 --- a/cranelift/filetests/filetests/wasm/control.clif +++ b/cranelift/filetests/filetests/wasm/control.clif @@ -6,60 +6,60 @@ target i686 haswell target x86_64 haswell function %br_if(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 1 - brz v0, ebb1(v1) - jump ebb2 + brz v0, block1(v1) + jump block2 -ebb1(v2: i32): +block1(v2: i32): return v2 -ebb2: - jump ebb1(v0) +block2: + jump block1(v0) } function %br_if_not(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 1 - brnz v0, ebb1(v0) - jump ebb2 + brnz v0, block1(v0) + jump block2 -ebb1(v2: i32): +block1(v2: i32): return v2 -ebb2: - jump ebb1(v0) +block2: + jump block1(v0) } function %br_if_fallthrough(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = iconst.i32 1 - brz v0, ebb1(v1) + brz v0, block1(v1) ; This jump gets converted to a fallthrough. - jump ebb1(v0) + jump block1(v0) -ebb1(v2: i32): +block1(v2: i32): return v2 } function %undefined() { -ebb0: +block0: trap user0 } function %br_table(i32) { -jt0 = jump_table [ebb3, ebb1, ebb2] +jt0 = jump_table [block3, block1, block2] -ebb0(v0: i32): - br_table v0, ebb4, jt0 +block0(v0: i32): + br_table v0, block4, jt0 -ebb4: +block4: trap oob -ebb1: +block1: return -ebb2: +block2: return -ebb3: +block3: return } diff --git a/cranelift/filetests/filetests/wasm/conversions.clif b/cranelift/filetests/filetests/wasm/conversions.clif index 6784637136..33602166b4 100644 --- a/cranelift/filetests/filetests/wasm/conversions.clif +++ b/cranelift/filetests/filetests/wasm/conversions.clif @@ -4,199 +4,199 @@ test compile target x86_64 haswell function %i32_wrap_i64(i64) -> i32 { -ebb0(v0: i64): +block0(v0: i64): v1 = ireduce.i32 v0 return v1 } function %i64_extend_s_i32(i32) -> i64 { -ebb0(v0: i32): +block0(v0: i32): v1 = sextend.i64 v0 return v1 } function %i64_extend_u_i32(i32) -> i64 { -ebb0(v0: i32): +block0(v0: i32): v1 = uextend.i64 v0 return v1 } function %i32_trunc_s_f32(f32) -> i32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_sint.i32 v0 return v1 } function %i32_trunc_u_f32(f32) -> i32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_uint.i32 v0 return v1 } function %i32_trunc_s_f64(f64) -> i32 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_sint.i32 v0 return v1 } function %i32_trunc_u_f64(f64) -> i32 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_uint.i32 v0 return v1 } function %i64_trunc_s_f32(f32) -> i64 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_sint.i64 v0 return v1 } function %i64_trunc_u_f32(f32) -> i64 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_uint.i64 v0 return v1 } function %i64_trunc_s_f64(f64) -> i64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_sint.i64 v0 return v1 } function %i64_trunc_u_f64(f64) -> i64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_uint.i64 v0 return v1 } function %i32_trunc_s_sat_f32(f32) -> i32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_sint_sat.i32 v0 return v1 } function %i32_trunc_u_sat_f32(f32) -> i32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_uint_sat.i32 v0 return v1 } function %i32_trunc_s_sat_f64(f64) -> i32 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_sint_sat.i32 v0 return v1 } function %i32_trunc_u_sat_f64(f64) -> i32 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_uint_sat.i32 v0 return v1 } function %i64_trunc_s_sat_f32(f32) -> i64 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_sint_sat.i64 v0 return v1 } function %i64_trunc_u_sat_f32(f32) -> i64 { -ebb0(v0: f32): +block0(v0: f32): v1 = fcvt_to_uint_sat.i64 v0 return v1 } function %i64_trunc_s_sat_f64(f64) -> i64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_sint_sat.i64 v0 return v1 } function %i64_trunc_u_sat_f64(f64) -> i64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fcvt_to_uint_sat.i64 v0 return v1 } function %f32_trunc_f64(f64) -> f32 { -ebb0(v0: f64): +block0(v0: f64): v1 = fdemote.f32 v0 return v1 } function %f64_promote_f32(f32) -> f64 { -ebb0(v0: f32): +block0(v0: f32): v1 = fpromote.f64 v0 return v1 } function %f32_convert_s_i32(i32) -> f32 { -ebb0(v0: i32): +block0(v0: i32): v1 = fcvt_from_sint.f32 v0 return v1 } function %f32_convert_u_i32(i32) -> f32 { -ebb0(v0: i32): +block0(v0: i32): v1 = fcvt_from_uint.f32 v0 return v1 } function %f64_convert_s_i32(i32) -> f64 { -ebb0(v0: i32): +block0(v0: i32): v1 = fcvt_from_sint.f64 v0 return v1 } function %f64_convert_u_i32(i32) -> f64 { -ebb0(v0: i32): +block0(v0: i32): v1 = fcvt_from_uint.f64 v0 return v1 } function %f32_convert_s_i64(i64) -> f32 { -ebb0(v0: i64): +block0(v0: i64): v1 = fcvt_from_sint.f32 v0 return v1 } function %f32_convert_u_i64(i64) -> f32 { -ebb0(v0: i64): +block0(v0: i64): v1 = fcvt_from_uint.f32 v0 return v1 } function %f64_convert_s_i64(i64) -> f64 { -ebb0(v0: i64): +block0(v0: i64): v1 = fcvt_from_sint.f64 v0 return v1 } function %f64_convert_u_i64(i64) -> f64 { -ebb0(v0: i64): +block0(v0: i64): v1 = fcvt_from_uint.f64 v0 return v1 } function %i32_reinterpret_f32(f32) -> i32 { -ebb0(v0: f32): +block0(v0: f32): v1 = bitcast.i32 v0 return v1 } function %f32_reinterpret_i32(i32) -> f32 { -ebb0(v0: i32): +block0(v0: i32): v1 = bitcast.f32 v0 return v1 } function %i64_reinterpret_f64(f64) -> i64 { -ebb0(v0: f64): +block0(v0: f64): v1 = bitcast.i64 v0 return v1 } function %f64_reinterpret_i64(i64) -> f64 { -ebb0(v0: i64): +block0(v0: i64): v1 = bitcast.f64 v0 return v1 } diff --git a/cranelift/filetests/filetests/wasm/f32-arith.clif b/cranelift/filetests/filetests/wasm/f32-arith.clif index c439d2e798..b7a83f5434 100644 --- a/cranelift/filetests/filetests/wasm/f32-arith.clif +++ b/cranelift/filetests/filetests/wasm/f32-arith.clif @@ -9,7 +9,7 @@ target x86_64 baseline ; Constants. function %f32_const() -> f32 { -ebb0: +block0: v1 = f32const 0x3.0 return v1 } @@ -17,43 +17,43 @@ ebb0: ; Unary operations function %f32_abs(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fabs v0 return v1 } function %f32_neg(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = fneg v0 return v1 } function %f32_sqrt(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = sqrt v0 return v1 } function %f32_ceil(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = ceil v0 return v1 } function %f32_floor(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = floor v0 return v1 } function %f32_trunc(f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = trunc v0 return v1 } function %f32_nearest (f32) -> f32 { -ebb0(v0: f32): +block0(v0: f32): v1 = nearest v0 return v1 } @@ -61,43 +61,43 @@ ebb0(v0: f32): ; Binary Operations function %f32_add(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fadd v0, v1 return v2 } function %f32_sub(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fsub v0, v1 return v2 } function %f32_mul(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fmul v0, v1 return v2 } function %f32_div(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fdiv v0, v1 return v2 } function %f32_min(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fmin v0, v1 return v2 } function %f32_max(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fmax v0, v1 return v2 } function %f32_copysign(f32, f32) -> f32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcopysign v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/wasm/f32-compares.clif b/cranelift/filetests/filetests/wasm/f32-compares.clif index e314646b1c..e569a94821 100644 --- a/cranelift/filetests/filetests/wasm/f32-compares.clif +++ b/cranelift/filetests/filetests/wasm/f32-compares.clif @@ -6,42 +6,42 @@ target i686 haswell target x86_64 haswell function %f32_eq(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp eq v0, v1 v3 = bint.i32 v2 return v3 } function %f32_ne(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp ne v0, v1 v3 = bint.i32 v2 return v3 } function %f32_lt(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp lt v0, v1 v3 = bint.i32 v2 return v3 } function %f32_gt(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp gt v0, v1 v3 = bint.i32 v2 return v3 } function %f32_le(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp le v0, v1 v3 = bint.i32 v2 return v3 } function %f32_ge(f32, f32) -> i32 { -ebb0(v0: f32, v1: f32): +block0(v0: f32, v1: f32): v2 = fcmp ge v0, v1 v3 = bint.i32 v2 return v3 diff --git a/cranelift/filetests/filetests/wasm/f32-memory64.clif b/cranelift/filetests/filetests/wasm/f32-memory64.clif index edc5c22780..33e3100537 100644 --- a/cranelift/filetests/filetests/wasm/f32-memory64.clif +++ b/cranelift/filetests/filetests/wasm/f32-memory64.clif @@ -9,7 +9,7 @@ function %f32_load(i32, i64 vmctx) -> f32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = load.f32 v2 return v3 @@ -19,7 +19,7 @@ function %f32_store(f32, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: f32, v1: i32, v2: i64): +block0(v0: f32, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 store v0, v3 return diff --git a/cranelift/filetests/filetests/wasm/f64-arith.clif b/cranelift/filetests/filetests/wasm/f64-arith.clif index 7209ed6024..cecd954f90 100644 --- a/cranelift/filetests/filetests/wasm/f64-arith.clif +++ b/cranelift/filetests/filetests/wasm/f64-arith.clif @@ -7,7 +7,7 @@ target x86_64 baseline ; Constants. function %f64_const() -> f64 { -ebb0: +block0: v1 = f64const 0x3.0 return v1 } @@ -15,43 +15,43 @@ ebb0: ; Unary operations function %f64_abs(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fabs v0 return v1 } function %f64_neg(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = fneg v0 return v1 } function %f64_sqrt(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = sqrt v0 return v1 } function %f64_ceil(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = ceil v0 return v1 } function %f64_floor(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = floor v0 return v1 } function %f64_trunc(f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = trunc v0 return v1 } function %f64_nearest (f64) -> f64 { -ebb0(v0: f64): +block0(v0: f64): v1 = nearest v0 return v1 } @@ -59,43 +59,43 @@ ebb0(v0: f64): ; Binary Operations function %f64_add(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fadd v0, v1 return v2 } function %f64_sub(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fsub v0, v1 return v2 } function %f64_mul(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fmul v0, v1 return v2 } function %f64_div(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fdiv v0, v1 return v2 } function %f64_min(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fmin v0, v1 return v2 } function %f64_max(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fmax v0, v1 return v2 } function %f64_copysign(f64, f64) -> f64 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcopysign v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/wasm/f64-compares.clif b/cranelift/filetests/filetests/wasm/f64-compares.clif index 5d51ebb443..b75a7634bf 100644 --- a/cranelift/filetests/filetests/wasm/f64-compares.clif +++ b/cranelift/filetests/filetests/wasm/f64-compares.clif @@ -6,42 +6,42 @@ target i686 haswell target x86_64 haswell function %f64_eq(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp eq v0, v1 v3 = bint.i32 v2 return v3 } function %f64_ne(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp ne v0, v1 v3 = bint.i32 v2 return v3 } function %f64_lt(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp lt v0, v1 v3 = bint.i32 v2 return v3 } function %f64_gt(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp gt v0, v1 v3 = bint.i32 v2 return v3 } function %f64_le(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp le v0, v1 v3 = bint.i32 v2 return v3 } function %f64_ge(f64, f64) -> i32 { -ebb0(v0: f64, v1: f64): +block0(v0: f64, v1: f64): v2 = fcmp ge v0, v1 v3 = bint.i32 v2 return v3 diff --git a/cranelift/filetests/filetests/wasm/f64-memory64.clif b/cranelift/filetests/filetests/wasm/f64-memory64.clif index 85351d9d8d..c0a58de4a1 100644 --- a/cranelift/filetests/filetests/wasm/f64-memory64.clif +++ b/cranelift/filetests/filetests/wasm/f64-memory64.clif @@ -9,7 +9,7 @@ function %f64_load(i32, i64 vmctx) -> f64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = load.f64 v2 return v3 @@ -19,7 +19,7 @@ function %f64_store(f64, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: f64, v1: i32, v2: i64): +block0(v0: f64, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 store v0, v3 return diff --git a/cranelift/filetests/filetests/wasm/i32-arith.clif b/cranelift/filetests/filetests/wasm/i32-arith.clif index 8c92613150..cb9597741b 100644 --- a/cranelift/filetests/filetests/wasm/i32-arith.clif +++ b/cranelift/filetests/filetests/wasm/i32-arith.clif @@ -9,7 +9,7 @@ target x86_64 baseline ; Constants. function %i32_const() -> i32 { -ebb0: +block0: v0 = iconst.i32 0x8765_4321 return v0 } @@ -17,19 +17,19 @@ ebb0: ; Unary operations. function %i32_clz(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = clz v0 return v1 } function %i32_ctz(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = ctz v0 return v1 } function %i32_popcnt(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = popcnt v0 return v1 } @@ -37,91 +37,91 @@ ebb0(v0: i32): ; Binary operations. function %i32_add(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = iadd v0, v1 return v2 } function %i32_sub(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = isub v0, v1 return v2 } function %i32_mul(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = imul v0, v1 return v2 } function %i32_div_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = sdiv v0, v1 return v2 } function %i32_div_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = udiv v0, v1 return v2 } function %i32_rem_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = srem v0, v1 return v2 } function %i32_rem_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = urem v0, v1 return v2 } function %i32_and(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = band v0, v1 return v2 } function %i32_or(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = bor v0, v1 return v2 } function %i32_xor(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = bxor v0, v1 return v2 } function %i32_shl(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = ishl v0, v1 return v2 } function %i32_shr_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = sshr v0, v1 return v2 } function %i32_shr_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = ushr v0, v1 return v2 } function %i32_rotl(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = rotl v0, v1 return v2 } function %i32_rotr(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = rotr v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/wasm/i32-compares.clif b/cranelift/filetests/filetests/wasm/i32-compares.clif index 1b43c5938a..f5be0a25c1 100644 --- a/cranelift/filetests/filetests/wasm/i32-compares.clif +++ b/cranelift/filetests/filetests/wasm/i32-compares.clif @@ -6,77 +6,77 @@ target i686 haswell target x86_64 haswell function %i32_eqz(i32) -> i32 { -ebb0(v0: i32): +block0(v0: i32): v1 = icmp_imm eq v0, 0 v2 = bint.i32 v1 return v2 } function %i32_eq(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp eq v0, v1 v3 = bint.i32 v2 return v3 } function %i32_ne(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp ne v0, v1 v3 = bint.i32 v2 return v3 } function %i32_lt_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp slt v0, v1 v3 = bint.i32 v2 return v3 } function %i32_lt_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp ult v0, v1 v3 = bint.i32 v2 return v3 } function %i32_gt_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp sgt v0, v1 v3 = bint.i32 v2 return v3 } function %i32_gt_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp ugt v0, v1 v3 = bint.i32 v2 return v3 } function %i32_le_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp sle v0, v1 v3 = bint.i32 v2 return v3 } function %i32_le_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp ule v0, v1 v3 = bint.i32 v2 return v3 } function %i32_ge_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp sge v0, v1 v3 = bint.i32 v2 return v3 } function %i32_ge_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = icmp uge v0, v1 v3 = bint.i32 v2 return v3 diff --git a/cranelift/filetests/filetests/wasm/i32-memory64.clif b/cranelift/filetests/filetests/wasm/i32-memory64.clif index 306c4e4aa5..b1418c5ed1 100644 --- a/cranelift/filetests/filetests/wasm/i32-memory64.clif +++ b/cranelift/filetests/filetests/wasm/i32-memory64.clif @@ -9,7 +9,7 @@ function %i32_load(i32, i64 vmctx) -> i32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = load.i32 v2 return v3 @@ -19,7 +19,7 @@ function %i32_store(i32, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i32, v2: i64): +block0(v0: i32, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 store v0, v3 return @@ -29,7 +29,7 @@ function %i32_load8_s(i32, i64 vmctx) -> i32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = sload8.i32 v2 return v3 @@ -39,7 +39,7 @@ function %i32_load8_u(i32, i64 vmctx) -> i32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = uload8.i32 v2 return v3 @@ -49,7 +49,7 @@ function %i32_store8(i32, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i32, v2: i64): +block0(v0: i32, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 istore8 v0, v3 return @@ -59,7 +59,7 @@ function %i32_load16_s(i32, i64 vmctx) -> i32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = sload16.i32 v2 return v3 @@ -69,7 +69,7 @@ function %i32_load16_u(i32, i64 vmctx) -> i32 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = uload16.i32 v2 return v3 @@ -79,7 +79,7 @@ function %i32_store16(i32, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i32, v2: i64): +block0(v0: i32, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 istore16 v0, v3 return diff --git a/cranelift/filetests/filetests/wasm/i64-arith.clif b/cranelift/filetests/filetests/wasm/i64-arith.clif index c4cd4a4507..b457f9942d 100644 --- a/cranelift/filetests/filetests/wasm/i64-arith.clif +++ b/cranelift/filetests/filetests/wasm/i64-arith.clif @@ -7,7 +7,7 @@ target x86_64 baseline ; Constants. function %i64_const() -> i64 { -ebb0: +block0: v0 = iconst.i64 0x8765_4321 return v0 } @@ -15,19 +15,19 @@ ebb0: ; Unary operations. function %i64_clz(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = clz v0 return v1 } function %i64_ctz(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = ctz v0 return v1 } function %i64_popcnt(i64) -> i64 { -ebb0(v0: i64): +block0(v0: i64): v1 = popcnt v0 return v1 } @@ -35,91 +35,91 @@ ebb0(v0: i64): ; Binary operations. function %i64_add(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = iadd v0, v1 return v2 } function %i64_sub(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = isub v0, v1 return v2 } function %i64_mul(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = imul v0, v1 return v2 } function %i32_div_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = sdiv v0, v1 return v2 } function %i32_div_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = udiv v0, v1 return v2 } function %i32_rem_s(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = srem v0, v1 return v2 } function %i32_rem_u(i32, i32) -> i32 { -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = urem v0, v1 return v2 } function %i64_and(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = band v0, v1 return v2 } function %i64_or(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = bor v0, v1 return v2 } function %i64_xor(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = bxor v0, v1 return v2 } function %i64_shl(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = ishl v0, v1 return v2 } function %i64_shr_s(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = sshr v0, v1 return v2 } function %i64_shr_u(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = ushr v0, v1 return v2 } function %i64_rotl(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = rotl v0, v1 return v2 } function %i64_rotr(i64, i64) -> i64 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = rotr v0, v1 return v2 } diff --git a/cranelift/filetests/filetests/wasm/i64-compares.clif b/cranelift/filetests/filetests/wasm/i64-compares.clif index e137b7e19c..2863efb6c3 100644 --- a/cranelift/filetests/filetests/wasm/i64-compares.clif +++ b/cranelift/filetests/filetests/wasm/i64-compares.clif @@ -4,77 +4,77 @@ test compile target x86_64 haswell function %i64_eqz(i64) -> i32 { -ebb0(v0: i64): +block0(v0: i64): v1 = icmp_imm eq v0, 0 v2 = bint.i32 v1 return v2 } function %i64_eq(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 v3 = bint.i32 v2 return v3 } function %i64_ne(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp ne v0, v1 v3 = bint.i32 v2 return v3 } function %i64_lt_s(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp slt v0, v1 v3 = bint.i32 v2 return v3 } function %i64_lt_u(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp ult v0, v1 v3 = bint.i32 v2 return v3 } function %i64_gt_s(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp sgt v0, v1 v3 = bint.i32 v2 return v3 } function %i64_gt_u(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp ugt v0, v1 v3 = bint.i32 v2 return v3 } function %i64_le_s(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp sle v0, v1 v3 = bint.i32 v2 return v3 } function %i64_le_u(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp ule v0, v1 v3 = bint.i32 v2 return v3 } function %i64_ge_s(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp sge v0, v1 v3 = bint.i32 v2 return v3 } function %i64_ge_u(i64, i64) -> i32 { -ebb0(v0: i64, v1: i64): +block0(v0: i64, v1: i64): v2 = icmp uge v0, v1 v3 = bint.i32 v2 return v3 diff --git a/cranelift/filetests/filetests/wasm/i64-memory64.clif b/cranelift/filetests/filetests/wasm/i64-memory64.clif index edea6da503..f2b34fc8b0 100644 --- a/cranelift/filetests/filetests/wasm/i64-memory64.clif +++ b/cranelift/filetests/filetests/wasm/i64-memory64.clif @@ -9,7 +9,7 @@ function %i64_load(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = load.i64 v2 return v3 @@ -19,7 +19,7 @@ function %i64_store(i64, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i64, v1: i32, v2: i64): +block0(v0: i64, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 store v0, v3 return @@ -29,7 +29,7 @@ function %i64_load8_s(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = sload8.i64 v2 return v3 @@ -39,7 +39,7 @@ function %i64_load8_u(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = uload8.i64 v2 return v3 @@ -49,7 +49,7 @@ function %i64_store8(i64, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i64, v1: i32, v2: i64): +block0(v0: i64, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 istore8 v0, v3 return @@ -59,7 +59,7 @@ function %i64_load16_s(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = sload16.i64 v2 return v3 @@ -69,7 +69,7 @@ function %i64_load16_u(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = uload16.i64 v2 return v3 @@ -79,7 +79,7 @@ function %i64_store16(i64, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i64, v1: i32, v2: i64): +block0(v0: i64, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 istore16 v0, v3 return @@ -89,7 +89,7 @@ function %i64_load32_s(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = sload32.i64 v2 return v3 @@ -99,7 +99,7 @@ function %i64_load32_u(i32, i64 vmctx) -> i64 { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 1 v3 = uload32.i64 v2 return v3 @@ -109,7 +109,7 @@ function %i64_store32(i64, i32, i64 vmctx) { gv0 = vmctx heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000 -ebb0(v0: i64, v1: i32, v2: i64): +block0(v0: i64, v1: i32, v2: i64): v3 = heap_addr.i64 heap0, v1, 1 istore32 v0, v3 return diff --git a/cranelift/filetests/filetests/wasm/multi-val-b1.clif b/cranelift/filetests/filetests/wasm/multi-val-b1.clif index 582403dcfb..7a4d4d02b0 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-b1.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-b1.clif @@ -7,8 +7,8 @@ target x86_64 haswell function %return_4_b1s(b1, b1, b1, b1) -> b1, b1, b1, b1 { ;; check: function %return_4_b1s(b1 [%rsi], b1 [%rdx], b1 [%rcx], b1 [%r8], i64 sret [%rdi], i64 fp [%rbp]) -> i64 sret [%rax], i64 fp [%rbp] fast { -ebb0(v0: b1, v1: b1, v2: b1, v3: b1): -; check: ebb0(v0: b1 [%rsi], v1: b1 [%rdx], v2: b1 [%rcx], v3: b1 [%r8], v4: i64 [%rdi], v13: i64 [%rbp]): +block0(v0: b1, v1: b1, v2: b1, v3: b1): +; check: block0(v0: b1 [%rsi], v1: b1 [%rdx], v2: b1 [%rcx], v3: b1 [%r8], v4: i64 [%rdi], v13: i64 [%rbp]): return v0, v1, v2, v3 ; check: v5 = bint.i8 v0 @@ -32,8 +32,8 @@ function %call_4_b1s() { fn0 = colocated %return_4_b1s(b1, b1, b1, b1) -> b1, b1, b1, b1 ; check: sig0 = (b1 [%rsi], b1 [%rdx], b1 [%rcx], b1 [%r8], i64 sret [%rdi]) -> i64 sret [%rax] fast -ebb0: -; check: ebb0(v26: i64 [%rbp], v27: i64 [%rbx]): +block0: +; check: block0(v26: i64 [%rbp], v27: i64 [%rbx]): v0 = bconst.b1 true v1 = bconst.b1 false diff --git a/cranelift/filetests/filetests/wasm/multi-val-call-indirect.clif b/cranelift/filetests/filetests/wasm/multi-val-call-indirect.clif index b102d652cf..6f5afd4700 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-call-indirect.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-call-indirect.clif @@ -9,7 +9,7 @@ function %call_indirect_many_rets(i64) { sig0 = () -> i64, i64, i64, i64 ; check: sig0 = (i64 sret [%rdi]) -> i64 sret [%rax] fast -ebb0(v0: i64): +block0(v0: i64): v1, v2, v3, v4 = call_indirect sig0, v0() ; check: v5 = stack_addr.i64 ss0 ; nextln: v6 = call_indirect sig0, v0(v5) diff --git a/cranelift/filetests/filetests/wasm/multi-val-f32.clif b/cranelift/filetests/filetests/wasm/multi-val-f32.clif index 9f3d0047cd..b69b71e047 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-f32.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-f32.clif @@ -4,14 +4,14 @@ target x86_64 haswell ;; Returning many f32s function %return_2_f32s() -> f32, f32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f32const 0x1.0 return v0, v1 } function %return_3_f32s() -> f32, f32, f32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f32const 0x1.0 v2 = f32const 0x2.0 @@ -19,7 +19,7 @@ ebb0: } function %return_4_f32s() -> f32, f32, f32, f32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f32const 0x1.0 v2 = f32const 0x2.0 @@ -33,7 +33,7 @@ function %call() -> f32 { fn0 = %a() -> f32, f32 fn1 = %b(f32, f32) -> f32, f32, f32 fn2 = %c(f32, f32, f32) -> f32, f32, f32, f32 -ebb0: +block0: v0, v1 = call fn0() v2, v3, v4 = call fn1(v0, v1) v5, v6, v7, v8 = call fn2(v2, v3, v4) diff --git a/cranelift/filetests/filetests/wasm/multi-val-f64.clif b/cranelift/filetests/filetests/wasm/multi-val-f64.clif index aa7e263eba..afb6585efc 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-f64.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-f64.clif @@ -4,14 +4,14 @@ target x86_64 haswell ;; Returning many f64s function %return_2_f64s() -> f64, f64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f64const 0x1.0 return v0, v1 } function %return_3_f64s() -> f64, f64, f64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f64const 0x1.0 v2 = f64const 0x2.0 @@ -19,7 +19,7 @@ ebb0: } function %return_4_f64s() -> f64, f64, f64, f64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f64const 0x1.0 v2 = f64const 0x2.0 @@ -33,7 +33,7 @@ function %call() -> f64 { fn0 = %a() -> f64, f64 fn1 = %b(f64, f64) -> f64, f64, f64 fn2 = %c(f64, f64, f64) -> f64, f64, f64, f64 -ebb0: +block0: v0, v1 = call fn0() v2, v3, v4 = call fn1(v0, v1) v5, v6, v7, v8 = call fn2(v2, v3, v4) diff --git a/cranelift/filetests/filetests/wasm/multi-val-i32.clif b/cranelift/filetests/filetests/wasm/multi-val-i32.clif index 924fcb4bc6..035cc2e332 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-i32.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-i32.clif @@ -4,14 +4,14 @@ target x86_64 haswell ;; Returning many i32s function %return_2_i32s() -> i32, i32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i32 1 return v0, v1 } function %return_3_i32s() -> i32, i32, i32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i32 1 v2 = iconst.i32 2 @@ -19,7 +19,7 @@ ebb0: } function %return_4_i32s() -> i32, i32, i32, i32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i32 1 v2 = iconst.i32 2 @@ -33,7 +33,7 @@ function %call() -> i32 { fn0 = %a() -> i32, i32 fn1 = %b(i32, i32) -> i32, i32, i32 fn2 = %c(i32, i32, i32) -> i32, i32, i32, i32 -ebb0: +block0: v0, v1 = call fn0() v2, v3, v4 = call fn1(v0, v1) v5, v6, v7, v8 = call fn2(v2, v3, v4) diff --git a/cranelift/filetests/filetests/wasm/multi-val-i64.clif b/cranelift/filetests/filetests/wasm/multi-val-i64.clif index f5ab392693..bacaf8240f 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-i64.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-i64.clif @@ -4,14 +4,14 @@ target x86_64 haswell ;; Returning many i64s function %return_2_i64s() -> i64, i64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i64 1 return v0, v1 } function %return_3_i64s() -> i64, i64, i64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i64 1 v2 = iconst.i64 2 @@ -19,7 +19,7 @@ ebb0: } function %return_4_i64s() -> i64, i64, i64, i64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i64 1 v2 = iconst.i64 2 @@ -33,7 +33,7 @@ function %call() -> i64 { fn0 = %a() -> i64, i64 fn1 = %b(i64, i64) -> i64, i64, i64 fn2 = %c(i64, i64, i64) -> i64, i64, i64, i64 -ebb0: +block0: v0, v1 = call fn0() v2, v3, v4 = call fn1(v0, v1) v5, v6, v7, v8 = call fn2(v2, v3, v4) diff --git a/cranelift/filetests/filetests/wasm/multi-val-mixed.clif b/cranelift/filetests/filetests/wasm/multi-val-mixed.clif index db66d202ff..e7289332c7 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-mixed.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-mixed.clif @@ -33,21 +33,21 @@ target x86_64 haswell ;; def make_returner(results): ;; results = list(results) ;; head = "function %return_" + "_".join(results) + "() -> " + ", ".join(results) + " {\n" -;; ebb = "ebb0:\n" +;; block = "block0:\n" ;; vals = [make_val(i, r) for i, r in enumerate(results)] ;; ret = " return " + ", ".join(("v" + str(i) for i in range(0, len(results)))) -;; return head + ebb + "\n".join(vals) + "\n" + ret + "\n}\n" +;; return head + block + "\n".join(vals) + "\n" + ret + "\n}\n" ;; ;; def make_caller(results): ;; results = list(results) ;; head = "function %call_" + "_".join(results) + "() {\n" ;; fn_decl = " fn0 = %foo() -> " + ",".join(results) + "\n" -;; ebb = "ebb0:\n" +;; block = "block0:\n" ;; ret_vars = ["v" + str(i) for i, r in enumerate(results)] ;; call = " " + ",".join(ret_vars) + " = call fn0()\n" ;; ret = " return\n" ;; tail = "}\n" -;; return head + fn_decl + ebb + call + ret + tail +;; return head + fn_decl + block + call + ret + tail ;; ;; for results in permutations(["i32", "i64", "f32", "f64", "b1"]): ;; print make_returner(results) @@ -58,7 +58,7 @@ target x86_64 haswell ;; regenerate the test. function %return_i32_i64_f32_f64_b1() -> i32, i64, f32, f64, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -69,13 +69,13 @@ ebb0: function %call_i32_i64_f32_f64_b1() { fn0 = %foo() -> i32,i64,f32,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_i64_f32_b1_f64() -> i32, i64, f32, b1, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -86,13 +86,13 @@ ebb0: function %call_i32_i64_f32_b1_f64() { fn0 = %foo() -> i32,i64,f32,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_i64_f64_f32_b1() -> i32, i64, f64, f32, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -103,13 +103,13 @@ ebb0: function %call_i32_i64_f64_f32_b1() { fn0 = %foo() -> i32,i64,f64,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_i64_f64_b1_f32() -> i32, i64, f64, b1, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -120,13 +120,13 @@ ebb0: function %call_i32_i64_f64_b1_f32() { fn0 = %foo() -> i32,i64,f64,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_i64_b1_f32_f64() -> i32, i64, b1, f32, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -137,13 +137,13 @@ ebb0: function %call_i32_i64_b1_f32_f64() { fn0 = %foo() -> i32,i64,b1,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_i64_b1_f64_f32() -> i32, i64, b1, f64, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -154,13 +154,13 @@ ebb0: function %call_i32_i64_b1_f64_f32() { fn0 = %foo() -> i32,i64,b1,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_i64_f64_b1() -> i32, f32, i64, f64, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -171,13 +171,13 @@ ebb0: function %call_i32_f32_i64_f64_b1() { fn0 = %foo() -> i32,f32,i64,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_i64_b1_f64() -> i32, f32, i64, b1, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -188,13 +188,13 @@ ebb0: function %call_i32_f32_i64_b1_f64() { fn0 = %foo() -> i32,f32,i64,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_f64_i64_b1() -> i32, f32, f64, i64, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -205,13 +205,13 @@ ebb0: function %call_i32_f32_f64_i64_b1() { fn0 = %foo() -> i32,f32,f64,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_f64_b1_i64() -> i32, f32, f64, b1, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -222,13 +222,13 @@ ebb0: function %call_i32_f32_f64_b1_i64() { fn0 = %foo() -> i32,f32,f64,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_b1_i64_f64() -> i32, f32, b1, i64, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -239,13 +239,13 @@ ebb0: function %call_i32_f32_b1_i64_f64() { fn0 = %foo() -> i32,f32,b1,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f32_b1_f64_i64() -> i32, f32, b1, f64, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -256,13 +256,13 @@ ebb0: function %call_i32_f32_b1_f64_i64() { fn0 = %foo() -> i32,f32,b1,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_i64_f32_b1() -> i32, f64, i64, f32, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -273,13 +273,13 @@ ebb0: function %call_i32_f64_i64_f32_b1() { fn0 = %foo() -> i32,f64,i64,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_i64_b1_f32() -> i32, f64, i64, b1, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -290,13 +290,13 @@ ebb0: function %call_i32_f64_i64_b1_f32() { fn0 = %foo() -> i32,f64,i64,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_f32_i64_b1() -> i32, f64, f32, i64, b1 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -307,13 +307,13 @@ ebb0: function %call_i32_f64_f32_i64_b1() { fn0 = %foo() -> i32,f64,f32,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_f32_b1_i64() -> i32, f64, f32, b1, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -324,13 +324,13 @@ ebb0: function %call_i32_f64_f32_b1_i64() { fn0 = %foo() -> i32,f64,f32,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_b1_i64_f32() -> i32, f64, b1, i64, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -341,13 +341,13 @@ ebb0: function %call_i32_f64_b1_i64_f32() { fn0 = %foo() -> i32,f64,b1,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_f64_b1_f32_i64() -> i32, f64, b1, f32, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -358,13 +358,13 @@ ebb0: function %call_i32_f64_b1_f32_i64() { fn0 = %foo() -> i32,f64,b1,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_i64_f32_f64() -> i32, b1, i64, f32, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -375,13 +375,13 @@ ebb0: function %call_i32_b1_i64_f32_f64() { fn0 = %foo() -> i32,b1,i64,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_i64_f64_f32() -> i32, b1, i64, f64, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -392,13 +392,13 @@ ebb0: function %call_i32_b1_i64_f64_f32() { fn0 = %foo() -> i32,b1,i64,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_f32_i64_f64() -> i32, b1, f32, i64, f64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -409,13 +409,13 @@ ebb0: function %call_i32_b1_f32_i64_f64() { fn0 = %foo() -> i32,b1,f32,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_f32_f64_i64() -> i32, b1, f32, f64, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -426,13 +426,13 @@ ebb0: function %call_i32_b1_f32_f64_i64() { fn0 = %foo() -> i32,b1,f32,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_f64_i64_f32() -> i32, b1, f64, i64, f32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -443,13 +443,13 @@ ebb0: function %call_i32_b1_f64_i64_f32() { fn0 = %foo() -> i32,b1,f64,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i32_b1_f64_f32_i64() -> i32, b1, f64, f32, i64 { -ebb0: +block0: v0 = iconst.i32 0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -460,13 +460,13 @@ ebb0: function %call_i32_b1_f64_f32_i64() { fn0 = %foo() -> i32,b1,f64,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_f32_f64_b1() -> i64, i32, f32, f64, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -477,13 +477,13 @@ ebb0: function %call_i64_i32_f32_f64_b1() { fn0 = %foo() -> i64,i32,f32,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_f32_b1_f64() -> i64, i32, f32, b1, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -494,13 +494,13 @@ ebb0: function %call_i64_i32_f32_b1_f64() { fn0 = %foo() -> i64,i32,f32,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_f64_f32_b1() -> i64, i32, f64, f32, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -511,13 +511,13 @@ ebb0: function %call_i64_i32_f64_f32_b1() { fn0 = %foo() -> i64,i32,f64,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_f64_b1_f32() -> i64, i32, f64, b1, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -528,13 +528,13 @@ ebb0: function %call_i64_i32_f64_b1_f32() { fn0 = %foo() -> i64,i32,f64,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_b1_f32_f64() -> i64, i32, b1, f32, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -545,13 +545,13 @@ ebb0: function %call_i64_i32_b1_f32_f64() { fn0 = %foo() -> i64,i32,b1,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_i32_b1_f64_f32() -> i64, i32, b1, f64, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -562,13 +562,13 @@ ebb0: function %call_i64_i32_b1_f64_f32() { fn0 = %foo() -> i64,i32,b1,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_i32_f64_b1() -> i64, f32, i32, f64, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -579,13 +579,13 @@ ebb0: function %call_i64_f32_i32_f64_b1() { fn0 = %foo() -> i64,f32,i32,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_i32_b1_f64() -> i64, f32, i32, b1, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -596,13 +596,13 @@ ebb0: function %call_i64_f32_i32_b1_f64() { fn0 = %foo() -> i64,f32,i32,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_f64_i32_b1() -> i64, f32, f64, i32, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -613,13 +613,13 @@ ebb0: function %call_i64_f32_f64_i32_b1() { fn0 = %foo() -> i64,f32,f64,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_f64_b1_i32() -> i64, f32, f64, b1, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -630,13 +630,13 @@ ebb0: function %call_i64_f32_f64_b1_i32() { fn0 = %foo() -> i64,f32,f64,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_b1_i32_f64() -> i64, f32, b1, i32, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -647,13 +647,13 @@ ebb0: function %call_i64_f32_b1_i32_f64() { fn0 = %foo() -> i64,f32,b1,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f32_b1_f64_i32() -> i64, f32, b1, f64, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -664,13 +664,13 @@ ebb0: function %call_i64_f32_b1_f64_i32() { fn0 = %foo() -> i64,f32,b1,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_i32_f32_b1() -> i64, f64, i32, f32, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -681,13 +681,13 @@ ebb0: function %call_i64_f64_i32_f32_b1() { fn0 = %foo() -> i64,f64,i32,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_i32_b1_f32() -> i64, f64, i32, b1, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -698,13 +698,13 @@ ebb0: function %call_i64_f64_i32_b1_f32() { fn0 = %foo() -> i64,f64,i32,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_f32_i32_b1() -> i64, f64, f32, i32, b1 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -715,13 +715,13 @@ ebb0: function %call_i64_f64_f32_i32_b1() { fn0 = %foo() -> i64,f64,f32,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_f32_b1_i32() -> i64, f64, f32, b1, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -732,13 +732,13 @@ ebb0: function %call_i64_f64_f32_b1_i32() { fn0 = %foo() -> i64,f64,f32,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_b1_i32_f32() -> i64, f64, b1, i32, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -749,13 +749,13 @@ ebb0: function %call_i64_f64_b1_i32_f32() { fn0 = %foo() -> i64,f64,b1,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_f64_b1_f32_i32() -> i64, f64, b1, f32, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -766,13 +766,13 @@ ebb0: function %call_i64_f64_b1_f32_i32() { fn0 = %foo() -> i64,f64,b1,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_i32_f32_f64() -> i64, b1, i32, f32, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -783,13 +783,13 @@ ebb0: function %call_i64_b1_i32_f32_f64() { fn0 = %foo() -> i64,b1,i32,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_i32_f64_f32() -> i64, b1, i32, f64, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -800,13 +800,13 @@ ebb0: function %call_i64_b1_i32_f64_f32() { fn0 = %foo() -> i64,b1,i32,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_f32_i32_f64() -> i64, b1, f32, i32, f64 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -817,13 +817,13 @@ ebb0: function %call_i64_b1_f32_i32_f64() { fn0 = %foo() -> i64,b1,f32,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_f32_f64_i32() -> i64, b1, f32, f64, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -834,13 +834,13 @@ ebb0: function %call_i64_b1_f32_f64_i32() { fn0 = %foo() -> i64,b1,f32,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_f64_i32_f32() -> i64, b1, f64, i32, f32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -851,13 +851,13 @@ ebb0: function %call_i64_b1_f64_i32_f32() { fn0 = %foo() -> i64,b1,f64,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_i64_b1_f64_f32_i32() -> i64, b1, f64, f32, i32 { -ebb0: +block0: v0 = iconst.i64 0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -868,13 +868,13 @@ ebb0: function %call_i64_b1_f64_f32_i32() { fn0 = %foo() -> i64,b1,f64,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_i64_f64_b1() -> f32, i32, i64, f64, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -885,13 +885,13 @@ ebb0: function %call_f32_i32_i64_f64_b1() { fn0 = %foo() -> f32,i32,i64,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_i64_b1_f64() -> f32, i32, i64, b1, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -902,13 +902,13 @@ ebb0: function %call_f32_i32_i64_b1_f64() { fn0 = %foo() -> f32,i32,i64,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_f64_i64_b1() -> f32, i32, f64, i64, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -919,13 +919,13 @@ ebb0: function %call_f32_i32_f64_i64_b1() { fn0 = %foo() -> f32,i32,f64,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_f64_b1_i64() -> f32, i32, f64, b1, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -936,13 +936,13 @@ ebb0: function %call_f32_i32_f64_b1_i64() { fn0 = %foo() -> f32,i32,f64,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_b1_i64_f64() -> f32, i32, b1, i64, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -953,13 +953,13 @@ ebb0: function %call_f32_i32_b1_i64_f64() { fn0 = %foo() -> f32,i32,b1,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i32_b1_f64_i64() -> f32, i32, b1, f64, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -970,13 +970,13 @@ ebb0: function %call_f32_i32_b1_f64_i64() { fn0 = %foo() -> f32,i32,b1,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_i32_f64_b1() -> f32, i64, i32, f64, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -987,13 +987,13 @@ ebb0: function %call_f32_i64_i32_f64_b1() { fn0 = %foo() -> f32,i64,i32,f64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_i32_b1_f64() -> f32, i64, i32, b1, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -1004,13 +1004,13 @@ ebb0: function %call_f32_i64_i32_b1_f64() { fn0 = %foo() -> f32,i64,i32,b1,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_f64_i32_b1() -> f32, i64, f64, i32, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -1021,13 +1021,13 @@ ebb0: function %call_f32_i64_f64_i32_b1() { fn0 = %foo() -> f32,i64,f64,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_f64_b1_i32() -> f32, i64, f64, b1, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -1038,13 +1038,13 @@ ebb0: function %call_f32_i64_f64_b1_i32() { fn0 = %foo() -> f32,i64,f64,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_b1_i32_f64() -> f32, i64, b1, i32, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -1055,13 +1055,13 @@ ebb0: function %call_f32_i64_b1_i32_f64() { fn0 = %foo() -> f32,i64,b1,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_i64_b1_f64_i32() -> f32, i64, b1, f64, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -1072,13 +1072,13 @@ ebb0: function %call_f32_i64_b1_f64_i32() { fn0 = %foo() -> f32,i64,b1,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_i32_i64_b1() -> f32, f64, i32, i64, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -1089,13 +1089,13 @@ ebb0: function %call_f32_f64_i32_i64_b1() { fn0 = %foo() -> f32,f64,i32,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_i32_b1_i64() -> f32, f64, i32, b1, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -1106,13 +1106,13 @@ ebb0: function %call_f32_f64_i32_b1_i64() { fn0 = %foo() -> f32,f64,i32,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_i64_i32_b1() -> f32, f64, i64, i32, b1 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -1123,13 +1123,13 @@ ebb0: function %call_f32_f64_i64_i32_b1() { fn0 = %foo() -> f32,f64,i64,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_i64_b1_i32() -> f32, f64, i64, b1, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -1140,13 +1140,13 @@ ebb0: function %call_f32_f64_i64_b1_i32() { fn0 = %foo() -> f32,f64,i64,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_b1_i32_i64() -> f32, f64, b1, i32, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -1157,13 +1157,13 @@ ebb0: function %call_f32_f64_b1_i32_i64() { fn0 = %foo() -> f32,f64,b1,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_f64_b1_i64_i32() -> f32, f64, b1, i64, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = bconst.b1 true @@ -1174,13 +1174,13 @@ ebb0: function %call_f32_f64_b1_i64_i32() { fn0 = %foo() -> f32,f64,b1,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_i32_i64_f64() -> f32, b1, i32, i64, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -1191,13 +1191,13 @@ ebb0: function %call_f32_b1_i32_i64_f64() { fn0 = %foo() -> f32,b1,i32,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_i32_f64_i64() -> f32, b1, i32, f64, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -1208,13 +1208,13 @@ ebb0: function %call_f32_b1_i32_f64_i64() { fn0 = %foo() -> f32,b1,i32,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_i64_i32_f64() -> f32, b1, i64, i32, f64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -1225,13 +1225,13 @@ ebb0: function %call_f32_b1_i64_i32_f64() { fn0 = %foo() -> f32,b1,i64,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_i64_f64_i32() -> f32, b1, i64, f64, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -1242,13 +1242,13 @@ ebb0: function %call_f32_b1_i64_f64_i32() { fn0 = %foo() -> f32,b1,i64,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_f64_i32_i64() -> f32, b1, f64, i32, i64 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -1259,13 +1259,13 @@ ebb0: function %call_f32_b1_f64_i32_i64() { fn0 = %foo() -> f32,b1,f64,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f32_b1_f64_i64_i32() -> f32, b1, f64, i64, i32 { -ebb0: +block0: v0 = f32const 0x0.0 v1 = bconst.b1 true v2 = f64const 0x0.0 @@ -1276,13 +1276,13 @@ ebb0: function %call_f32_b1_f64_i64_i32() { fn0 = %foo() -> f32,b1,f64,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_i64_f32_b1() -> f64, i32, i64, f32, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -1293,13 +1293,13 @@ ebb0: function %call_f64_i32_i64_f32_b1() { fn0 = %foo() -> f64,i32,i64,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_i64_b1_f32() -> f64, i32, i64, b1, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -1310,13 +1310,13 @@ ebb0: function %call_f64_i32_i64_b1_f32() { fn0 = %foo() -> f64,i32,i64,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_f32_i64_b1() -> f64, i32, f32, i64, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -1327,13 +1327,13 @@ ebb0: function %call_f64_i32_f32_i64_b1() { fn0 = %foo() -> f64,i32,f32,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_f32_b1_i64() -> f64, i32, f32, b1, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -1344,13 +1344,13 @@ ebb0: function %call_f64_i32_f32_b1_i64() { fn0 = %foo() -> f64,i32,f32,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_b1_i64_f32() -> f64, i32, b1, i64, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -1361,13 +1361,13 @@ ebb0: function %call_f64_i32_b1_i64_f32() { fn0 = %foo() -> f64,i32,b1,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i32_b1_f32_i64() -> f64, i32, b1, f32, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = bconst.b1 true @@ -1378,13 +1378,13 @@ ebb0: function %call_f64_i32_b1_f32_i64() { fn0 = %foo() -> f64,i32,b1,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_i32_f32_b1() -> f64, i64, i32, f32, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -1395,13 +1395,13 @@ ebb0: function %call_f64_i64_i32_f32_b1() { fn0 = %foo() -> f64,i64,i32,f32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_i32_b1_f32() -> f64, i64, i32, b1, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -1412,13 +1412,13 @@ ebb0: function %call_f64_i64_i32_b1_f32() { fn0 = %foo() -> f64,i64,i32,b1,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_f32_i32_b1() -> f64, i64, f32, i32, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -1429,13 +1429,13 @@ ebb0: function %call_f64_i64_f32_i32_b1() { fn0 = %foo() -> f64,i64,f32,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_f32_b1_i32() -> f64, i64, f32, b1, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -1446,13 +1446,13 @@ ebb0: function %call_f64_i64_f32_b1_i32() { fn0 = %foo() -> f64,i64,f32,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_b1_i32_f32() -> f64, i64, b1, i32, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -1463,13 +1463,13 @@ ebb0: function %call_f64_i64_b1_i32_f32() { fn0 = %foo() -> f64,i64,b1,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_i64_b1_f32_i32() -> f64, i64, b1, f32, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = bconst.b1 true @@ -1480,13 +1480,13 @@ ebb0: function %call_f64_i64_b1_f32_i32() { fn0 = %foo() -> f64,i64,b1,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_i32_i64_b1() -> f64, f32, i32, i64, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -1497,13 +1497,13 @@ ebb0: function %call_f64_f32_i32_i64_b1() { fn0 = %foo() -> f64,f32,i32,i64,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_i32_b1_i64() -> f64, f32, i32, b1, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -1514,13 +1514,13 @@ ebb0: function %call_f64_f32_i32_b1_i64() { fn0 = %foo() -> f64,f32,i32,b1,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_i64_i32_b1() -> f64, f32, i64, i32, b1 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -1531,13 +1531,13 @@ ebb0: function %call_f64_f32_i64_i32_b1() { fn0 = %foo() -> f64,f32,i64,i32,b1 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_i64_b1_i32() -> f64, f32, i64, b1, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -1548,13 +1548,13 @@ ebb0: function %call_f64_f32_i64_b1_i32() { fn0 = %foo() -> f64,f32,i64,b1,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_b1_i32_i64() -> f64, f32, b1, i32, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -1565,13 +1565,13 @@ ebb0: function %call_f64_f32_b1_i32_i64() { fn0 = %foo() -> f64,f32,b1,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_f32_b1_i64_i32() -> f64, f32, b1, i64, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = bconst.b1 true @@ -1582,13 +1582,13 @@ ebb0: function %call_f64_f32_b1_i64_i32() { fn0 = %foo() -> f64,f32,b1,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_i32_i64_f32() -> f64, b1, i32, i64, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -1599,13 +1599,13 @@ ebb0: function %call_f64_b1_i32_i64_f32() { fn0 = %foo() -> f64,b1,i32,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_i32_f32_i64() -> f64, b1, i32, f32, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = iconst.i32 0 @@ -1616,13 +1616,13 @@ ebb0: function %call_f64_b1_i32_f32_i64() { fn0 = %foo() -> f64,b1,i32,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_i64_i32_f32() -> f64, b1, i64, i32, f32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -1633,13 +1633,13 @@ ebb0: function %call_f64_b1_i64_i32_f32() { fn0 = %foo() -> f64,b1,i64,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_i64_f32_i32() -> f64, b1, i64, f32, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = iconst.i64 0 @@ -1650,13 +1650,13 @@ ebb0: function %call_f64_b1_i64_f32_i32() { fn0 = %foo() -> f64,b1,i64,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_f32_i32_i64() -> f64, b1, f32, i32, i64 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -1667,13 +1667,13 @@ ebb0: function %call_f64_b1_f32_i32_i64() { fn0 = %foo() -> f64,b1,f32,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_f64_b1_f32_i64_i32() -> f64, b1, f32, i64, i32 { -ebb0: +block0: v0 = f64const 0x0.0 v1 = bconst.b1 true v2 = f32const 0x0.0 @@ -1684,13 +1684,13 @@ ebb0: function %call_f64_b1_f32_i64_i32() { fn0 = %foo() -> f64,b1,f32,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_i64_f32_f64() -> b1, i32, i64, f32, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -1701,13 +1701,13 @@ ebb0: function %call_b1_i32_i64_f32_f64() { fn0 = %foo() -> b1,i32,i64,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_i64_f64_f32() -> b1, i32, i64, f64, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = iconst.i64 0 @@ -1718,13 +1718,13 @@ ebb0: function %call_b1_i32_i64_f64_f32() { fn0 = %foo() -> b1,i32,i64,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_f32_i64_f64() -> b1, i32, f32, i64, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -1735,13 +1735,13 @@ ebb0: function %call_b1_i32_f32_i64_f64() { fn0 = %foo() -> b1,i32,f32,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_f32_f64_i64() -> b1, i32, f32, f64, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = f32const 0x0.0 @@ -1752,13 +1752,13 @@ ebb0: function %call_b1_i32_f32_f64_i64() { fn0 = %foo() -> b1,i32,f32,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_f64_i64_f32() -> b1, i32, f64, i64, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -1769,13 +1769,13 @@ ebb0: function %call_b1_i32_f64_i64_f32() { fn0 = %foo() -> b1,i32,f64,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i32_f64_f32_i64() -> b1, i32, f64, f32, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i32 0 v2 = f64const 0x0.0 @@ -1786,13 +1786,13 @@ ebb0: function %call_b1_i32_f64_f32_i64() { fn0 = %foo() -> b1,i32,f64,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_i32_f32_f64() -> b1, i64, i32, f32, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -1803,13 +1803,13 @@ ebb0: function %call_b1_i64_i32_f32_f64() { fn0 = %foo() -> b1,i64,i32,f32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_i32_f64_f32() -> b1, i64, i32, f64, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = iconst.i32 0 @@ -1820,13 +1820,13 @@ ebb0: function %call_b1_i64_i32_f64_f32() { fn0 = %foo() -> b1,i64,i32,f64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_f32_i32_f64() -> b1, i64, f32, i32, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -1837,13 +1837,13 @@ ebb0: function %call_b1_i64_f32_i32_f64() { fn0 = %foo() -> b1,i64,f32,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_f32_f64_i32() -> b1, i64, f32, f64, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = f32const 0x0.0 @@ -1854,13 +1854,13 @@ ebb0: function %call_b1_i64_f32_f64_i32() { fn0 = %foo() -> b1,i64,f32,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_f64_i32_f32() -> b1, i64, f64, i32, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -1871,13 +1871,13 @@ ebb0: function %call_b1_i64_f64_i32_f32() { fn0 = %foo() -> b1,i64,f64,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_i64_f64_f32_i32() -> b1, i64, f64, f32, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = iconst.i64 0 v2 = f64const 0x0.0 @@ -1888,13 +1888,13 @@ ebb0: function %call_b1_i64_f64_f32_i32() { fn0 = %foo() -> b1,i64,f64,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_i32_i64_f64() -> b1, f32, i32, i64, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -1905,13 +1905,13 @@ ebb0: function %call_b1_f32_i32_i64_f64() { fn0 = %foo() -> b1,f32,i32,i64,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_i32_f64_i64() -> b1, f32, i32, f64, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = iconst.i32 0 @@ -1922,13 +1922,13 @@ ebb0: function %call_b1_f32_i32_f64_i64() { fn0 = %foo() -> b1,f32,i32,f64,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_i64_i32_f64() -> b1, f32, i64, i32, f64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -1939,13 +1939,13 @@ ebb0: function %call_b1_f32_i64_i32_f64() { fn0 = %foo() -> b1,f32,i64,i32,f64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_i64_f64_i32() -> b1, f32, i64, f64, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = iconst.i64 0 @@ -1956,13 +1956,13 @@ ebb0: function %call_b1_f32_i64_f64_i32() { fn0 = %foo() -> b1,f32,i64,f64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_f64_i32_i64() -> b1, f32, f64, i32, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -1973,13 +1973,13 @@ ebb0: function %call_b1_f32_f64_i32_i64() { fn0 = %foo() -> b1,f32,f64,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f32_f64_i64_i32() -> b1, f32, f64, i64, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f32const 0x0.0 v2 = f64const 0x0.0 @@ -1990,13 +1990,13 @@ ebb0: function %call_b1_f32_f64_i64_i32() { fn0 = %foo() -> b1,f32,f64,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_i32_i64_f32() -> b1, f64, i32, i64, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -2007,13 +2007,13 @@ ebb0: function %call_b1_f64_i32_i64_f32() { fn0 = %foo() -> b1,f64,i32,i64,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_i32_f32_i64() -> b1, f64, i32, f32, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = iconst.i32 0 @@ -2024,13 +2024,13 @@ ebb0: function %call_b1_f64_i32_f32_i64() { fn0 = %foo() -> b1,f64,i32,f32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_i64_i32_f32() -> b1, f64, i64, i32, f32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -2041,13 +2041,13 @@ ebb0: function %call_b1_f64_i64_i32_f32() { fn0 = %foo() -> b1,f64,i64,i32,f32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_i64_f32_i32() -> b1, f64, i64, f32, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = iconst.i64 0 @@ -2058,13 +2058,13 @@ ebb0: function %call_b1_f64_i64_f32_i32() { fn0 = %foo() -> b1,f64,i64,f32,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_f32_i32_i64() -> b1, f64, f32, i32, i64 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -2075,13 +2075,13 @@ ebb0: function %call_b1_f64_f32_i32_i64() { fn0 = %foo() -> b1,f64,f32,i32,i64 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } function %return_b1_f64_f32_i64_i32() -> b1, f64, f32, i64, i32 { -ebb0: +block0: v0 = bconst.b1 true v1 = f64const 0x0.0 v2 = f32const 0x0.0 @@ -2092,7 +2092,7 @@ ebb0: function %call_b1_f64_f32_i64_i32() { fn0 = %foo() -> b1,f64,f32,i64,i32 -ebb0: +block0: v0,v1,v2,v3,v4 = call fn0() return } diff --git a/cranelift/filetests/filetests/wasm/multi-val-reuse-ret-ptr-stack-slot.clif b/cranelift/filetests/filetests/wasm/multi-val-reuse-ret-ptr-stack-slot.clif index f7d0bf846c..d712bf21ce 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-reuse-ret-ptr-stack-slot.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-reuse-ret-ptr-stack-slot.clif @@ -16,7 +16,7 @@ function %foo() -> i32, f32 { ; nextln: fn0 = %f sig0 ; nextln: fn1 = %g sig1 -ebb0: +block0: v0, v1, v2, v3, v4 = call fn0() ; check: v18 = stack_addr.i64 ss0 ; nextln: v25 = func_addr.i64 fn0 diff --git a/cranelift/filetests/filetests/wasm/multi-val-sret-slot-alignment.clif b/cranelift/filetests/filetests/wasm/multi-val-sret-slot-alignment.clif index b6c74e314e..5004ebbe54 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-sret-slot-alignment.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-sret-slot-alignment.clif @@ -7,8 +7,8 @@ target x86_64 haswell function %returner() -> i8, i32, i8, i64 { ; check: function %returner(i64 sret [%rdi]) -> i64 sret [%rax] fast { -ebb0: -; check: ebb0(v4: i64): +block0: +; check: block0(v4: i64): v0 = iconst.i8 0 v1 = iconst.i32 1 @@ -31,7 +31,7 @@ function %caller() { ; check: sig0 = (i64 sret [%rdi]) -> i64 sret [%rax] fast ; nextln: fn0 = %returner sig0 -ebb0: +block0: v0, v1, v2, v3 = call fn0() ; check: v4 = stack_addr.i64 ss0 ; nextln: v10 = func_addr.i64 fn0 diff --git a/cranelift/filetests/filetests/wasm/multi-val-take-many-and-return-many.clif b/cranelift/filetests/filetests/wasm/multi-val-take-many-and-return-many.clif index 385cc9d27c..17f2f306d4 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-take-many-and-return-many.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-take-many-and-return-many.clif @@ -2,13 +2,13 @@ test compile target x86_64 haswell function %returner(i32, i64, f32, f64) -> i32, i64, f32, f64 { -ebb0(v0: i32, v1: i64, v2: f32, v3: f64): +block0(v0: i32, v1: i64, v2: f32, v3: f64): return v0, v1, v2, v3 } function %caller() { fn0 = %returner(i32, i64, f32, f64) -> i32, i64, f32, f64 -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i64 1 v2 = f32const 0x2.0 diff --git a/cranelift/filetests/filetests/wasm/multi-val-tons-of-results.clif b/cranelift/filetests/filetests/wasm/multi-val-tons-of-results.clif index f19b1bcbf0..f394bdd904 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-tons-of-results.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-tons-of-results.clif @@ -2,7 +2,7 @@ test compile target x86_64 haswell function %return_20_i32s() -> i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 { -ebb0: +block0: v0 = iconst.i32 0 v1 = iconst.i32 1 v2 = iconst.i32 2 @@ -28,7 +28,7 @@ ebb0: function %call_20_i32s() { fn0 = %return_20_i32s() -> i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 -ebb0: +block0: v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19 = call fn0() return } diff --git a/cranelift/filetests/filetests/wasm/r32.clif b/cranelift/filetests/filetests/wasm/r32.clif index a60dd4d29b..7e1622246a 100644 --- a/cranelift/filetests/filetests/wasm/r32.clif +++ b/cranelift/filetests/filetests/wasm/r32.clif @@ -7,11 +7,11 @@ set enable_safepoints=true target i686 haswell function %select_ref(i32, r32, r32) -> r32 { -ebb0(v0: i32, v1: r32, v2: r32): - brz v0, ebb1(v2) - jump ebb1(v1) +block0(v0: i32, v1: r32, v2: r32): + brz v0, block1(v2) + jump block1(v1) -ebb1(v3: r32): +block1(v3: r32): return v3 } @@ -21,7 +21,7 @@ function %table_set(i32, r32, i32 vmctx) { gv2 = load.i32 notrap aligned gv0 +4 table0 = dynamic gv1, element_size 1, bound gv2, index_type i32 -ebb0(v0: i32, v1: r32, v2: i32): +block0(v0: i32, v1: r32, v2: i32): v3 = table_addr.i32 table0, v0, +0; store.r32 notrap aligned v1, v3 return @@ -33,7 +33,7 @@ function %table_get(i32, i32 vmctx) -> r32 { gv2 = load.i32 notrap aligned gv0 +4 table0 = dynamic gv1, element_size 1, bound gv2, index_type i32 -ebb0(v0: i32, v1: i32): +block0(v0: i32, v1: i32): v2 = table_addr.i32 table0, v0, +0; v3 = load.r32 notrap aligned v2 return v3 @@ -44,7 +44,7 @@ function %test_refs(r32, r32, r32, i32 vmctx) { fn1 = %table_set(i32, r32, i32 vmctx) fn2 = %table_get(i32, i32 vmctx) -> r32 -ebb0(v0: r32, v1: r32, v2: r32, v3: i32): +block0(v0: r32, v1: r32, v2: r32, v3: i32): v4 = iconst.i32 0 v5 = iconst.i32 1 v8 = iconst.i32 2 diff --git a/cranelift/filetests/filetests/wasm/r64.clif b/cranelift/filetests/filetests/wasm/r64.clif index 5637028103..9fab27fbb5 100644 --- a/cranelift/filetests/filetests/wasm/r64.clif +++ b/cranelift/filetests/filetests/wasm/r64.clif @@ -7,11 +7,11 @@ set enable_safepoints=true target x86_64 haswell function %select_ref(i32, r64, r64) -> r64 { -ebb0(v0: i32, v1: r64, v2: r64): - brz v0, ebb1(v2) - jump ebb1(v1) +block0(v0: i32, v1: r64, v2: r64): + brz v0, block1(v2) + jump block1(v1) -ebb1(v3: r64): +block1(v3: r64): return v3 } @@ -21,7 +21,7 @@ function %table_set(i32, r64, i64 vmctx) { gv2 = load.i32 notrap aligned gv0 +8 table0 = dynamic gv1, element_size 1, bound gv2, index_type i32 -ebb0(v0: i32, v1: r64, v2: i64): +block0(v0: i32, v1: r64, v2: i64): v3 = table_addr.i64 table0, v0, +0; store.r64 notrap aligned v1, v3 return @@ -33,7 +33,7 @@ function %table_get(i32, i64 vmctx) -> r64 { gv2 = load.i32 notrap aligned gv0 +8 table0 = dynamic gv1, element_size 1, bound gv2, index_type i32 -ebb0(v0: i32, v1: i64): +block0(v0: i32, v1: i64): v2 = table_addr.i64 table0, v0, +0; v3 = load.r64 notrap aligned v2 return v3 @@ -44,7 +44,7 @@ function %test_refs(r64, r64, r64, i64 vmctx) { fn1 = %table_set(i32, r64, i64 vmctx) fn2 = %table_get(i32, i64 vmctx) -> r64 -ebb0(v0: r64, v1: r64, v2: r64, v3: i64): +block0(v0: r64, v1: r64, v2: r64, v3: i64): v4 = iconst.i32 0 v5 = iconst.i32 1 v8 = iconst.i32 2 diff --git a/cranelift/filetests/filetests/wasm/select.clif b/cranelift/filetests/filetests/wasm/select.clif index f5f55cda24..b2508ef6e5 100644 --- a/cranelift/filetests/filetests/wasm/select.clif +++ b/cranelift/filetests/filetests/wasm/select.clif @@ -6,25 +6,25 @@ target i686 haswell target x86_64 haswell function %select_i32(i32, i32, i32) -> i32 { -ebb0(v0: i32, v1: i32, v2: i32): +block0(v0: i32, v1: i32, v2: i32): v3 = select v2, v0, v1 return v3 } function %select_i64(i64, i64, i32) -> i64 { -ebb0(v0: i64, v1: i64, v2: i32): +block0(v0: i64, v1: i64, v2: i32): v3 = select v2, v0, v1 return v3 } function %select_f32(f32, f32, i32) -> f32 { -ebb0(v0: f32, v1: f32, v2: i32): +block0(v0: f32, v1: f32, v2: i32): v3 = select v2, v0, v1 return v3 } function %select_f64(f64, f64, i32) -> f64 { -ebb0(v0: f64, v1: f64, v2: i32): +block0(v0: f64, v1: f64, v2: i32): v3 = select v2, v0, v1 return v3 } diff --git a/cranelift/filetests/src/function_runner.rs b/cranelift/filetests/src/function_runner.rs index 32c2bf5f50..302acf0cb4 100644 --- a/cranelift/filetests/src/function_runner.rs +++ b/cranelift/filetests/src/function_runner.rs @@ -98,7 +98,7 @@ mod test { " test run function %test() -> b8 { - ebb0: + block0: nop v1 = bconst.b8 true return v1 diff --git a/cranelift/filetests/src/match_directive.rs b/cranelift/filetests/src/match_directive.rs index a3f951f3b4..bb379f25c9 100644 --- a/cranelift/filetests/src/match_directive.rs +++ b/cranelift/filetests/src/match_directive.rs @@ -1,7 +1,7 @@ /// Look for a directive in a comment string. /// The directive is of the form "foo:" and should follow the leading `;` in the comment: /// -/// ; dominates: ebb3 ebb4 +/// ; dominates: block3 block4 /// /// Return the comment text following the directive. pub fn match_directive<'a>(comment: &'a str, directive: &str) -> Option<&'a str> { diff --git a/cranelift/filetests/src/test_binemit.rs b/cranelift/filetests/src/test_binemit.rs index 1a6bd60e99..d2cfdd97ca 100644 --- a/cranelift/filetests/src/test_binemit.rs +++ b/cranelift/filetests/src/test_binemit.rs @@ -73,8 +73,8 @@ impl binemit::CodeSink for TextSink { self.offset += 8; } - fn reloc_ebb(&mut self, reloc: binemit::Reloc, ebb_offset: binemit::CodeOffset) { - write!(self.text, "{}({}) ", reloc, ebb_offset).unwrap(); + fn reloc_block(&mut self, reloc: binemit::Reloc, block_offset: binemit::CodeOffset) { + write!(self.text, "{}({}) ", reloc, block_offset).unwrap(); } fn reloc_external( @@ -151,9 +151,9 @@ impl SubTest for TestBinEmit { // Give an encoding to any instruction that doesn't already have one. let mut divert = RegDiversions::new(); - for ebb in func.layout.ebbs() { + for block in func.layout.blocks() { divert.clear(); - for inst in func.layout.ebb_insts(ebb) { + for inst in func.layout.block_insts(block) { if !func.encodings[inst].is_legal() { // Find an encoding that satisfies both immediate field and register // constraints. @@ -181,7 +181,7 @@ impl SubTest for TestBinEmit { } } - // Relax branches and compute EBB offsets based on the encodings. + // Relax branches and compute block offsets based on the encodings. let mut cfg = ControlFlowGraph::with_function(&func); let mut domtree = DominatorTree::with_function(&func, &cfg); let CodeInfo { total_size, .. } = @@ -218,15 +218,15 @@ impl SubTest for TestBinEmit { // Now emit all instructions. let mut sink = TextSink::new(); - for ebb in func.layout.ebbs() { + for block in func.layout.blocks() { divert.clear(); // Correct header offsets should have been computed by `relax_branches()`. assert_eq!( - sink.offset, func.offsets[ebb], + sink.offset, func.offsets[block], "Inconsistent {} header offset", - ebb + block ); - for (offset, inst, enc_bytes) in func.inst_offsets(ebb, &encinfo) { + for (offset, inst, enc_bytes) in func.inst_offsets(block, &encinfo) { assert_eq!(sink.offset, offset); sink.text.clear(); let enc = func.encodings[inst]; @@ -293,8 +293,8 @@ impl SubTest for TestBinEmit { for (jt, jt_data) in func.jump_tables.iter() { let jt_offset = func.jt_offsets[jt]; - for ebb in jt_data.iter() { - let rel_offset: i32 = func.offsets[*ebb] as i32 - jt_offset as i32; + for block in jt_data.iter() { + let rel_offset: i32 = func.offsets[*block] as i32 - jt_offset as i32; sink.put4(rel_offset as u32) } } diff --git a/cranelift/filetests/src/test_compile.rs b/cranelift/filetests/src/test_compile.rs index 10e07440ed..0ac8c48c5e 100644 --- a/cranelift/filetests/src/test_compile.rs +++ b/cranelift/filetests/src/test_compile.rs @@ -98,7 +98,7 @@ impl binemit::CodeSink for SizeSink { self.offset += 8; } - fn reloc_ebb(&mut self, _reloc: binemit::Reloc, _ebb_offset: binemit::CodeOffset) {} + fn reloc_block(&mut self, _reloc: binemit::Reloc, _block_offset: binemit::CodeOffset) {} fn reloc_external( &mut self, _reloc: binemit::Reloc, diff --git a/cranelift/filetests/src/test_domtree.rs b/cranelift/filetests/src/test_domtree.rs index 05c7dc31e5..f5f81ed03a 100644 --- a/cranelift/filetests/src/test_domtree.rs +++ b/cranelift/filetests/src/test_domtree.rs @@ -3,11 +3,11 @@ //! The `test domtree` test command looks for annotations on instructions like this: //! //! ```clif -//! jump ebb3 ; dominates: ebb3 +//! jump block3 ; dominates: block3 //! ``` //! //! This annotation means that the jump instruction is expected to be the immediate dominator of -//! `ebb3`. +//! `block3`. //! //! We verify that the dominator tree annotations are complete and correct. //! @@ -58,31 +58,31 @@ impl SubTest for TestDomtree { )); } }; - for src_ebb in tail.split_whitespace() { - let ebb = match context.details.map.lookup_str(src_ebb) { - Some(AnyEntity::Ebb(ebb)) => ebb, - _ => return Err(format!("expected defined EBB, got {}", src_ebb)), + for src_block in tail.split_whitespace() { + let block = match context.details.map.lookup_str(src_block) { + Some(AnyEntity::Block(block)) => block, + _ => return Err(format!("expected defined block, got {}", src_block)), }; - // Annotations say that `inst` is the idom of `ebb`. - if expected.insert(ebb, inst).is_some() { - return Err(format!("multiple dominators for {}", src_ebb)); + // Annotations say that `inst` is the idom of `block`. + if expected.insert(block, inst).is_some() { + return Err(format!("multiple dominators for {}", src_block)); } // Compare to computed domtree. - match domtree.idom(ebb) { + match domtree.idom(block) { Some(got_inst) if got_inst != inst => { return Err(format!( "mismatching idoms for {}:\n\ want: {}, got: {}", - src_ebb, inst, got_inst + src_block, inst, got_inst )); } None => { return Err(format!( "mismatching idoms for {}:\n\ want: {}, got: unreachable", - src_ebb, inst + src_block, inst )); } _ => {} @@ -92,18 +92,18 @@ impl SubTest for TestDomtree { } // Now we know that everything in `expected` is consistent with `domtree`. - // All other EBB's should be either unreachable or the entry block. - for ebb in func + // All other block's should be either unreachable or the entry block. + for block in func .layout - .ebbs() + .blocks() .skip(1) - .filter(|ebb| !expected.contains_key(ebb)) + .filter(|block| !expected.contains_key(block)) { - if let Some(got_inst) = domtree.idom(ebb) { + if let Some(got_inst) = domtree.idom(block) { return Err(format!( "mismatching idoms for renumbered {}:\n\ want: unrechable, got: {}", - ebb, got_inst + block, got_inst )); } } @@ -118,8 +118,8 @@ fn filecheck_text(func: &Function, domtree: &DominatorTree) -> Result Result, + blocks: SecondaryMap, types: SecondaryMap, } @@ -40,12 +40,12 @@ pub struct FunctionBuilder<'a> { } #[derive(Clone, Default)] -struct EbbData { - /// An Ebb is "pristine" iff no instructions have been added since the last +struct BlockData { + /// An Block is "pristine" iff no instructions have been added since the last /// call to `switch_to_block()`. pristine: bool, - /// An Ebb is "filled" iff a terminator instruction has been inserted since + /// An Block is "filled" iff a terminator instruction has been inserted since /// the last call to `switch_to_block()`. /// /// A filled block cannot be pristine. @@ -57,20 +57,20 @@ struct EbbData { #[derive(Default)] struct Position { - ebb: PackedOption, - basic_block: PackedOption, + block: PackedOption, + basic_block: PackedOption, } impl Position { - fn at(ebb: Ebb, basic_block: Block) -> Self { + fn at(block: Block, basic_block: SSABlock) -> Self { Self { - ebb: PackedOption::from(ebb), + block: PackedOption::from(block), basic_block: PackedOption::from(basic_block), } } fn is_default(&self) -> bool { - self.ebb.is_none() && self.basic_block.is_none() + self.block.is_none() && self.basic_block.is_none() } } @@ -80,19 +80,19 @@ impl FunctionBuilderContext { pub fn new() -> Self { Self { ssa: SSABuilder::new(), - ebbs: SecondaryMap::new(), + blocks: SecondaryMap::new(), types: SecondaryMap::new(), } } fn clear(&mut self) { self.ssa.clear(); - self.ebbs.clear(); + self.blocks.clear(); self.types.clear(); } fn is_empty(&self) -> bool { - self.ssa.is_empty() && self.ebbs.is_empty() && self.types.is_empty() + self.ssa.is_empty() && self.blocks.is_empty() && self.types.is_empty() } } @@ -100,12 +100,12 @@ impl FunctionBuilderContext { /// one convenience method per Cranelift IR instruction. pub struct FuncInstBuilder<'short, 'long: 'short> { builder: &'short mut FunctionBuilder<'long>, - ebb: Ebb, + block: Block, } impl<'short, 'long> FuncInstBuilder<'short, 'long> { - fn new(builder: &'short mut FunctionBuilder<'long>, ebb: Ebb) -> Self { - Self { builder, ebb } + fn new(builder: &'short mut FunctionBuilder<'long>, block: Block) -> Self { + Self { builder, block } } } @@ -122,22 +122,22 @@ impl<'short, 'long> InstBuilderBase<'short> for FuncInstBuilder<'short, 'long> { // instruction being inserted to add related info to the DFG and the SSA building system, // and perform debug sanity checks. fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'short mut DataFlowGraph) { - // We only insert the Ebb in the layout when an instruction is added to it - self.builder.ensure_inserted_ebb(); + // We only insert the Block in the layout when an instruction is added to it + self.builder.ensure_inserted_block(); let inst = self.builder.func.dfg.make_inst(data.clone()); self.builder.func.dfg.make_inst_results(inst, ctrl_typevar); - self.builder.func.layout.append_inst(inst, self.ebb); + self.builder.func.layout.append_inst(inst, self.block); if !self.builder.srcloc.is_default() { self.builder.func.srclocs[inst] = self.builder.srcloc; } if data.opcode().is_branch() { match data.branch_destination() { - Some(dest_ebb) => { + Some(dest_block) => { // If the user has supplied jump arguments we must adapt the arguments of - // the destination ebb - self.builder.declare_successor(dest_ebb, inst); + // the destination block + self.builder.declare_successor(dest_block, inst); } None => { // branch_destination() doesn't detect jump_tables @@ -149,23 +149,23 @@ impl<'short, 'long> InstBuilderBase<'short> for FuncInstBuilder<'short, 'long> { // Unlike all other jumps/branches, jump tables are // capable of having the same successor appear // multiple times, so we must deduplicate. - let mut unique = EntitySet::::new(); - for dest_ebb in self + let mut unique = EntitySet::::new(); + for dest_block in self .builder .func .jump_tables .get(table) .expect("you are referencing an undeclared jump table") .iter() - .filter(|&dest_ebb| unique.insert(*dest_ebb)) + .filter(|&dest_block| unique.insert(*dest_block)) { - self.builder.func_ctx.ssa.declare_ebb_predecessor( - *dest_ebb, + self.builder.func_ctx.ssa.declare_block_predecessor( + *dest_block, self.builder.position.basic_block.unwrap(), inst, ); } - self.builder.func_ctx.ssa.declare_ebb_predecessor( + self.builder.func_ctx.ssa.declare_block_predecessor( destination, self.builder.position.basic_block.unwrap(), inst, @@ -189,7 +189,7 @@ impl<'short, 'long> InstBuilderBase<'short> for FuncInstBuilder<'short, 'long> { /// The module is parametrized by one type which is the representation of variables in your /// origin language. It offers a way to conveniently append instruction to your program flow. /// You are responsible to split your instruction flow into extended blocks (declared with -/// `create_ebb`) whose properties are: +/// `create_block`) whose properties are: /// /// - branch and jump instructions can only point at the top of extended blocks; /// - the last instruction of each block is a terminator instruction which has no natural successor, @@ -214,7 +214,7 @@ impl<'short, 'long> InstBuilderBase<'short> for FuncInstBuilder<'short, 'long> { /// /// The functions below will panic in debug mode whenever you try to modify the Cranelift IR /// function in a way that violate the coherence of the code. For instance: switching to a new -/// `Ebb` when you haven't filled the current one with a terminator instruction, inserting a +/// `Block` when you haven't filled the current one with a terminator instruction, inserting a /// return instruction with arguments that don't match the function's signature. impl<'a> FunctionBuilder<'a> { /// Creates a new FunctionBuilder structure that will operate on a `Function` using a @@ -234,26 +234,26 @@ impl<'a> FunctionBuilder<'a> { self.srcloc = srcloc; } - /// Creates a new `Ebb` and returns its reference. - pub fn create_ebb(&mut self) -> Ebb { - let ebb = self.func.dfg.make_ebb(); - self.func_ctx.ssa.declare_ebb_header_block(ebb); - self.func_ctx.ebbs[ebb] = EbbData { + /// Creates a new `Block` and returns its reference. + pub fn create_block(&mut self) -> Block { + let block = self.func.dfg.make_block(); + self.func_ctx.ssa.declare_block_header_block(block); + self.func_ctx.blocks[block] = BlockData { filled: false, pristine: true, user_param_count: 0, }; - ebb + block } /// After the call to this function, new instructions will be inserted into the designated - /// block, in the order they are declared. You must declare the types of the Ebb arguments + /// block, in the order they are declared. You must declare the types of the Block arguments /// you will use here. /// /// When inserting the terminator instruction (which doesn't have a fallthrough to its immediate /// successor), the block will be declared filled and it will not be possible to append /// instructions to it. - pub fn switch_to_block(&mut self, ebb: Ebb) { + pub fn switch_to_block(&mut self, block: Block) { // First we check that the previous block has been filled. debug_assert!( self.position.is_default() @@ -264,33 +264,33 @@ impl<'a> FunctionBuilder<'a> { ); // We cannot switch to a filled block debug_assert!( - !self.func_ctx.ebbs[ebb].filled, + !self.func_ctx.blocks[block].filled, "you cannot switch to a block which is already filled" ); - let basic_block = self.func_ctx.ssa.header_block(ebb); + let basic_block = self.func_ctx.ssa.header_block(block); // Then we change the cursor position. - self.position = Position::at(ebb, basic_block); + self.position = Position::at(block, basic_block); } /// Declares that all the predecessors of this block are known. /// - /// Function to call with `ebb` as soon as the last branch instruction to `ebb` has been + /// Function to call with `block` as soon as the last branch instruction to `block` has been /// created. Forgetting to call this method on every block will cause inconsistencies in the /// produced functions. - pub fn seal_block(&mut self, ebb: Ebb) { - let side_effects = self.func_ctx.ssa.seal_ebb_header_block(ebb, self.func); + pub fn seal_block(&mut self, block: Block) { + let side_effects = self.func_ctx.ssa.seal_block_header_block(block, self.func); self.handle_ssa_side_effects(side_effects); } /// Effectively calls seal_block on all blocks in the function. /// - /// It's more efficient to seal `Ebb`s as soon as possible, during + /// It's more efficient to seal `Block`s as soon as possible, during /// translation, but for frontends where this is impractical to do, this /// function can be used at the end of translating all blocks to ensure /// that everything is sealed. pub fn seal_all_blocks(&mut self) { - let side_effects = self.func_ctx.ssa.seal_all_ebb_header_blocks(self.func); + let side_effects = self.func_ctx.ssa.seal_all_block_header_blocks(self.func); self.handle_ssa_side_effects(side_effects); } @@ -392,26 +392,26 @@ impl<'a> FunctionBuilder<'a> { } /// Returns an object with the [`InstBuilder`](cranelift_codegen::ir::InstBuilder) - /// trait that allows to conveniently append an instruction to the current `Ebb` being built. + /// trait that allows to conveniently append an instruction to the current `Block` being built. pub fn ins<'short>(&'short mut self) -> FuncInstBuilder<'short, 'a> { - let ebb = self + let block = self .position - .ebb + .block .expect("Please call switch_to_block before inserting instructions"); - FuncInstBuilder::new(self, ebb) + FuncInstBuilder::new(self, block) } - /// Make sure that the current EBB is inserted in the layout. - pub fn ensure_inserted_ebb(&mut self) { - let ebb = self.position.ebb.unwrap(); - if self.func_ctx.ebbs[ebb].pristine { - if !self.func.layout.is_ebb_inserted(ebb) { - self.func.layout.append_ebb(ebb); + /// Make sure that the current block is inserted in the layout. + pub fn ensure_inserted_block(&mut self) { + let block = self.position.block.unwrap(); + if self.func_ctx.blocks[block].pristine { + if !self.func.layout.is_block_inserted(block) { + self.func.layout.append_block(block); } - self.func_ctx.ebbs[ebb].pristine = false; + self.func_ctx.blocks[block].pristine = false; } else { debug_assert!( - !self.func_ctx.ebbs[ebb].filled, + !self.func_ctx.blocks[block].filled, "you cannot add an instruction to a block already filled" ); } @@ -422,40 +422,40 @@ impl<'a> FunctionBuilder<'a> { /// This can be used to insert SSA code that doesn't need to access locals and that doesn't /// need to know about `FunctionBuilder` at all. pub fn cursor(&mut self) -> FuncCursor { - self.ensure_inserted_ebb(); + self.ensure_inserted_block(); FuncCursor::new(self.func) .with_srcloc(self.srcloc) - .at_bottom(self.position.ebb.unwrap()) + .at_bottom(self.position.block.unwrap()) } - /// Append parameters to the given `Ebb` corresponding to the function - /// parameters. This can be used to set up the ebb parameters for the + /// Append parameters to the given `Block` corresponding to the function + /// parameters. This can be used to set up the block parameters for the /// entry block. - pub fn append_ebb_params_for_function_params(&mut self, ebb: Ebb) { + pub fn append_block_params_for_function_params(&mut self, block: Block) { debug_assert!( - !self.func_ctx.ssa.has_any_predecessors(ebb), - "ebb parameters for function parameters should only be added to the entry block" + !self.func_ctx.ssa.has_any_predecessors(block), + "block parameters for function parameters should only be added to the entry block" ); // These parameters count as "user" parameters here because they aren't // inserted by the SSABuilder. - let user_param_count = &mut self.func_ctx.ebbs[ebb].user_param_count; + let user_param_count = &mut self.func_ctx.blocks[block].user_param_count; for argtyp in &self.func.signature.params { *user_param_count += 1; - self.func.dfg.append_ebb_param(ebb, argtyp.value_type); + self.func.dfg.append_block_param(block, argtyp.value_type); } } - /// Append parameters to the given `Ebb` corresponding to the function - /// return values. This can be used to set up the ebb parameters for a + /// Append parameters to the given `Block` corresponding to the function + /// return values. This can be used to set up the block parameters for a /// function exit block. - pub fn append_ebb_params_for_function_returns(&mut self, ebb: Ebb) { + pub fn append_block_params_for_function_returns(&mut self, block: Block) { // These parameters count as "user" parameters here because they aren't // inserted by the SSABuilder. - let user_param_count = &mut self.func_ctx.ebbs[ebb].user_param_count; + let user_param_count = &mut self.func_ctx.blocks[block].user_param_count; for argtyp in &self.func.signature.returns { *user_param_count += 1; - self.func.dfg.append_ebb_param(ebb, argtyp.value_type); + self.func.dfg.append_block_param(block, argtyp.value_type); } } @@ -463,19 +463,18 @@ impl<'a> FunctionBuilder<'a> { /// resets the state of the `FunctionBuilder` in preparation to be used /// for another function. pub fn finalize(&mut self) { - // Check that all the `Ebb`s are filled and sealed. + // Check that all the `Block`s are filled and sealed. debug_assert!( - self.func_ctx - .ebbs - .iter() - .all(|(ebb, ebb_data)| ebb_data.pristine || self.func_ctx.ssa.is_sealed(ebb)), + self.func_ctx.blocks.iter().all( + |(block, block_data)| block_data.pristine || self.func_ctx.ssa.is_sealed(block) + ), "all blocks should be sealed before dropping a FunctionBuilder" ); debug_assert!( self.func_ctx - .ebbs + .blocks .values() - .all(|ebb_data| ebb_data.pristine || ebb_data.filled), + .all(|block_data| block_data.pristine || block_data.filled), "all blocks should be filled before dropping a FunctionBuilder" ); @@ -483,10 +482,10 @@ impl<'a> FunctionBuilder<'a> { #[cfg(debug_assertions)] { // Iterate manually to provide more helpful error messages. - for ebb in self.func_ctx.ebbs.keys() { - if let Err((inst, _msg)) = self.func.is_ebb_basic(ebb) { + for block in self.func_ctx.blocks.keys() { + if let Err((inst, _msg)) = self.func.is_block_basic(block) { let inst_str = self.func.dfg.display_inst(inst, None); - panic!("{} failed basic block invariants on {}", ebb, inst_str); + panic!("{} failed basic block invariants on {}", block, inst_str); } } } @@ -507,10 +506,10 @@ impl<'a> FunctionBuilder<'a> { /// function. The functions below help you inspect the function you're creating and modify it /// in ways that can be unsafe if used incorrectly. impl<'a> FunctionBuilder<'a> { - /// Retrieves all the parameters for an `Ebb` currently inferred from the jump instructions + /// Retrieves all the parameters for a `Block` currently inferred from the jump instructions /// inserted that target it and the SSA construction. - pub fn ebb_params(&self, ebb: Ebb) -> &[Value] { - self.func.dfg.ebb_params(ebb) + pub fn block_params(&self, block: Block) -> &[Value] { + self.func.dfg.block_params(block) } /// Retrieves the signature with reference `sigref` previously added with `import_signature`. @@ -518,22 +517,22 @@ impl<'a> FunctionBuilder<'a> { self.func.dfg.signatures.get(sigref) } - /// Creates a parameter for a specific `Ebb` by appending it to the list of already existing + /// Creates a parameter for a specific `Block` by appending it to the list of already existing /// parameters. /// - /// **Note:** this function has to be called at the creation of the `Ebb` before adding + /// **Note:** this function has to be called at the creation of the `Block` before adding /// instructions to it, otherwise this could interfere with SSA construction. - pub fn append_ebb_param(&mut self, ebb: Ebb, ty: Type) -> Value { + pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value { debug_assert!( - self.func_ctx.ebbs[ebb].pristine, - "You can't add EBB parameters after adding any instruction" + self.func_ctx.blocks[block].pristine, + "You can't add block parameters after adding any instruction" ); debug_assert_eq!( - self.func_ctx.ebbs[ebb].user_param_count, - self.func.dfg.num_ebb_params(ebb) + self.func_ctx.blocks[block].user_param_count, + self.func.dfg.num_block_params(block) ); - self.func_ctx.ebbs[ebb].user_param_count += 1; - self.func.dfg.append_ebb_param(ebb, ty) + self.func_ctx.blocks[block].user_param_count += 1; + self.func.dfg.append_block_param(block, ty) } /// Returns the result values of an instruction. @@ -545,43 +544,43 @@ impl<'a> FunctionBuilder<'a> { /// /// **Note:** You are responsible for maintaining the coherence with the arguments of /// other jump instructions. - pub fn change_jump_destination(&mut self, inst: Inst, new_dest: Ebb) { + pub fn change_jump_destination(&mut self, inst: Inst, new_dest: Block) { let old_dest = self.func.dfg[inst] .branch_destination_mut() .expect("you want to change the jump destination of a non-jump instruction"); - let pred = self.func_ctx.ssa.remove_ebb_predecessor(*old_dest, inst); + let pred = self.func_ctx.ssa.remove_block_predecessor(*old_dest, inst); *old_dest = new_dest; self.func_ctx .ssa - .declare_ebb_predecessor(new_dest, pred, inst); + .declare_block_predecessor(new_dest, pred, inst); } - /// Returns `true` if and only if the current `Ebb` is sealed and has no predecessors declared. + /// Returns `true` if and only if the current `Block` is sealed and has no predecessors declared. /// /// The entry block of a function is never unreachable. pub fn is_unreachable(&self) -> bool { let is_entry = match self.func.layout.entry_block() { None => false, - Some(entry) => self.position.ebb.unwrap() == entry, + Some(entry) => self.position.block.unwrap() == entry, }; !is_entry - && self.func_ctx.ssa.is_sealed(self.position.ebb.unwrap()) + && self.func_ctx.ssa.is_sealed(self.position.block.unwrap()) && !self .func_ctx .ssa - .has_any_predecessors(self.position.ebb.unwrap()) + .has_any_predecessors(self.position.block.unwrap()) } /// Returns `true` if and only if no instructions have been added since the last call to /// `switch_to_block`. pub fn is_pristine(&self) -> bool { - self.func_ctx.ebbs[self.position.ebb.unwrap()].pristine + self.func_ctx.blocks[self.position.block.unwrap()].pristine } /// Returns `true` if and only if a terminator instruction has been inserted since the /// last call to `switch_to_block`. pub fn is_filled(&self) -> bool { - self.func_ctx.ebbs[self.position.ebb.unwrap()].filled + self.func_ctx.blocks[self.position.block.unwrap()].filled } /// Returns a displayable object for the function as it is. @@ -860,29 +859,29 @@ impl<'a> FunctionBuilder<'a> { self.position.basic_block = PackedOption::from( self.func_ctx .ssa - .declare_ebb_body_block(self.position.basic_block.unwrap()), + .declare_block_body_block(self.position.basic_block.unwrap()), ); } - /// An Ebb is 'filled' when a terminator instruction is present. + /// An Block is 'filled' when a terminator instruction is present. fn fill_current_block(&mut self) { - self.func_ctx.ebbs[self.position.ebb.unwrap()].filled = true; + self.func_ctx.blocks[self.position.block.unwrap()].filled = true; } - fn declare_successor(&mut self, dest_ebb: Ebb, jump_inst: Inst) { - self.func_ctx.ssa.declare_ebb_predecessor( - dest_ebb, + fn declare_successor(&mut self, dest_block: Block, jump_inst: Inst) { + self.func_ctx.ssa.declare_block_predecessor( + dest_block, self.position.basic_block.unwrap(), jump_inst, ); } fn handle_ssa_side_effects(&mut self, side_effects: SideEffects) { - for split_ebb in side_effects.split_ebbs_created { - self.func_ctx.ebbs[split_ebb].filled = true + for split_block in side_effects.split_blocks_created { + self.func_ctx.blocks[split_block].filled = true } - for modified_ebb in side_effects.instructions_added_to_ebbs { - self.func_ctx.ebbs[modified_ebb].pristine = false + for modified_block in side_effects.instructions_added_to_blocks { + self.func_ctx.blocks[modified_block].pristine = false } } } @@ -910,24 +909,24 @@ mod tests { { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); - let block1 = builder.create_ebb(); - let block2 = builder.create_ebb(); - let block3 = builder.create_ebb(); + let block0 = builder.create_block(); + let block1 = builder.create_block(); + let block2 = builder.create_block(); + let block3 = builder.create_block(); let x = Variable::new(0); let y = Variable::new(1); let z = Variable::new(2); builder.declare_var(x, I32); builder.declare_var(y, I32); builder.declare_var(z, I32); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); if !lazy_seal { builder.seal_block(block0); } { - let tmp = builder.ebb_params(block0)[0]; // the first function parameter + let tmp = builder.block_params(block0)[0]; // the first function parameter builder.def_var(x, tmp); } { @@ -1033,14 +1032,14 @@ mod tests { { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); + let block0 = builder.create_block(); let x = Variable::new(0); let y = Variable::new(1); let z = Variable::new(2); builder.declare_var(x, target.pointer_type()); builder.declare_var(y, target.pointer_type()); builder.declare_var(z, I32); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); let src = builder.use_var(x); @@ -1059,7 +1058,7 @@ mod tests { sig0 = (i32, i32, i32) system_v fn0 = %Memcpy sig0 -ebb0: +block0: v3 = iconst.i32 0 v1 -> v3 v2 = iconst.i32 0 @@ -1094,12 +1093,12 @@ ebb0: { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); + let block0 = builder.create_block(); let x = Variable::new(0); let y = Variable::new(16); builder.declare_var(x, target.pointer_type()); builder.declare_var(y, target.pointer_type()); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); let src = builder.use_var(x); @@ -1115,7 +1114,7 @@ ebb0: assert_eq!( func.display(None).to_string(), "function %sample() -> i32 system_v { -ebb0: +block0: v4 = iconst.i32 0 v1 -> v4 v3 = iconst.i32 0 @@ -1151,12 +1150,12 @@ ebb0: { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); + let block0 = builder.create_block(); let x = Variable::new(0); let y = Variable::new(16); builder.declare_var(x, target.pointer_type()); builder.declare_var(y, target.pointer_type()); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); let src = builder.use_var(x); @@ -1175,7 +1174,7 @@ ebb0: sig0 = (i32, i32, i32) system_v fn0 = %Memcpy sig0 -ebb0: +block0: v4 = iconst.i32 0 v1 -> v4 v3 = iconst.i32 0 @@ -1211,10 +1210,10 @@ ebb0: { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); + let block0 = builder.create_block(); let y = Variable::new(16); builder.declare_var(y, target.pointer_type()); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); let dest = builder.use_var(y); @@ -1229,7 +1228,7 @@ ebb0: assert_eq!( func.display(None).to_string(), "function %sample() -> i32 system_v { -ebb0: +block0: v2 = iconst.i32 0 v0 -> v2 v1 = iconst.i64 0x0001_0001_0101 @@ -1263,10 +1262,10 @@ ebb0: { let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx); - let block0 = builder.create_ebb(); + let block0 = builder.create_block(); let y = Variable::new(16); builder.declare_var(y, target.pointer_type()); - builder.append_ebb_params_for_function_params(block0); + builder.append_block_params_for_function_params(block0); builder.switch_to_block(block0); let dest = builder.use_var(y); @@ -1284,7 +1283,7 @@ ebb0: sig0 = (i32, i32, i32) system_v fn0 = %Memset sig0 -ebb0: +block0: v4 = iconst.i32 0 v0 -> v4 v1 = iconst.i8 1 diff --git a/cranelift/frontend/src/lib.rs b/cranelift/frontend/src/lib.rs index d6d63381ce..d28cb53cdf 100644 --- a/cranelift/frontend/src/lib.rs +++ b/cranelift/frontend/src/lib.rs @@ -83,22 +83,22 @@ //! { //! let mut builder = FunctionBuilder::new(&mut func, &mut fn_builder_ctx); //! -//! let block0 = builder.create_ebb(); -//! let block1 = builder.create_ebb(); -//! let block2 = builder.create_ebb(); -//! let block3 = builder.create_ebb(); +//! let block0 = builder.create_block(); +//! let block1 = builder.create_block(); +//! let block2 = builder.create_block(); +//! let block3 = builder.create_block(); //! let x = Variable::new(0); //! let y = Variable::new(1); //! let z = Variable::new(2); //! builder.declare_var(x, I32); //! builder.declare_var(y, I32); //! builder.declare_var(z, I32); -//! builder.append_ebb_params_for_function_params(block0); +//! builder.append_block_params_for_function_params(block0); //! //! builder.switch_to_block(block0); //! builder.seal_block(block0); //! { -//! let tmp = builder.ebb_params(block0)[0]; // the first function parameter +//! let tmp = builder.block_params(block0)[0]; // the first function parameter //! builder.def_var(x, tmp); //! } //! { diff --git a/cranelift/frontend/src/ssa.rs b/cranelift/frontend/src/ssa.rs index c24e373677..7d36d9b1c7 100644 --- a/cranelift/frontend/src/ssa.rs +++ b/cranelift/frontend/src/ssa.rs @@ -16,7 +16,7 @@ use cranelift_codegen::entity::{EntityRef, PrimaryMap, SecondaryMap}; use cranelift_codegen::ir::immediates::{Ieee32, Ieee64}; use cranelift_codegen::ir::instructions::BranchInfo; use cranelift_codegen::ir::types::{F32, F64}; -use cranelift_codegen::ir::{Ebb, Function, Inst, InstBuilder, InstructionData, Type, Value}; +use cranelift_codegen::ir::{Block, Function, Inst, InstBuilder, InstructionData, Type, Value}; use cranelift_codegen::packed_option::PackedOption; use cranelift_codegen::packed_option::ReservedValue; use smallvec::SmallVec; @@ -27,7 +27,7 @@ use smallvec::SmallVec; /// non-SSA language you're translating from. /// /// The SSA building relies on information about the variables used and defined, as well as -/// their position relative to basic blocks which are stricter than extended basic blocks since +/// their position relative to basic blocks which are stricter tha basic blocks since /// they don't allow branching in the middle of them. /// /// This SSA building module allows you to def and use variables on the fly while you are @@ -40,14 +40,14 @@ pub struct SSABuilder { // TODO: Consider a sparse representation rather than SecondaryMap-of-SecondaryMap. /// Records for every variable and for every relevant block, the last definition of /// the variable in the block. - variables: SecondaryMap>>, + variables: SecondaryMap>>, /// Records the position of the basic blocks and the list of values used but not defined in the /// block. - blocks: PrimaryMap, + ssa_blocks: PrimaryMap, - /// Records the basic blocks at the beginning of the `Ebb`s. - ebb_headers: SecondaryMap>, + /// Records the basic blocks at the beginning of the `Block`s. + block_headers: SecondaryMap>, /// Call stack for use in the `use_var`/`predecessors_lookup` state machine. calls: Vec, @@ -58,54 +58,54 @@ pub struct SSABuilder { side_effects: SideEffects, } -/// Side effects of a `use_var` or a `seal_ebb_header_block` method call. +/// Side effects of a `use_var` or a `seal_block_header_block` method call. pub struct SideEffects { /// When we want to append jump arguments to a `br_table` instruction, the critical edge is - /// splitted and the newly created `Ebb`s are signaled here. - pub split_ebbs_created: Vec, + /// splitted and the newly created `Block`s are signaled here. + pub split_blocks_created: Vec, /// When a variable is used but has never been defined before (this happens in the case of - /// unreachable code), a placeholder `iconst` or `fconst` value is added to the right `Ebb`. - /// This field signals if it is the case and return the `Ebb` to which the initialization has + /// unreachable code), a placeholder `iconst` or `fconst` value is added to the right `Block`. + /// This field signals if it is the case and return the `Block` to which the initialization has /// been added. - pub instructions_added_to_ebbs: Vec, + pub instructions_added_to_blocks: Vec, } impl SideEffects { fn new() -> Self { Self { - split_ebbs_created: Vec::new(), - instructions_added_to_ebbs: Vec::new(), + split_blocks_created: Vec::new(), + instructions_added_to_blocks: Vec::new(), } } fn is_empty(&self) -> bool { - self.split_ebbs_created.is_empty() && self.instructions_added_to_ebbs.is_empty() + self.split_blocks_created.is_empty() && self.instructions_added_to_blocks.is_empty() } } /// Describes the current position of a basic block in the control flow graph. -enum BlockData { - /// A block at the top of an `Ebb`. - EbbHeader(EbbHeaderBlockData), - /// A block inside an `Ebb` with an unique other block as its predecessor. +enum SSABlockData { + /// A block at the top of a `Block`. + BlockHeader(BlockHeaderSSABlockData), + /// A block inside a `Block` with an unique other block as its predecessor. /// The block is implicitly sealed at creation. - EbbBody { predecessor: Block }, + BlockBody { ssa_pred: SSABlock }, } -impl BlockData { - fn add_predecessor(&mut self, pred: Block, inst: Inst) { +impl SSABlockData { + fn add_predecessor(&mut self, ssa_pred: SSABlock, inst: Inst) { match *self { - Self::EbbBody { .. } => panic!("you can't add a predecessor to a body block"), - Self::EbbHeader(ref mut data) => { + Self::BlockBody { .. } => panic!("you can't add a predecessor to a body block"), + Self::BlockHeader(ref mut data) => { debug_assert!(!data.sealed, "sealed blocks cannot accept new predecessors"); - data.predecessors.push(PredBlock::new(pred, inst)); + data.predecessors.push(PredBlock::new(ssa_pred, inst)); } } } - fn remove_predecessor(&mut self, inst: Inst) -> Block { + fn remove_predecessor(&mut self, inst: Inst) -> SSABlock { match *self { - Self::EbbBody { .. } => panic!("should not happen"), - Self::EbbHeader(ref mut data) => { + Self::BlockBody { .. } => panic!("should not happen"), + Self::BlockHeader(ref mut data) => { // This a linear complexity operation but the number of predecessors is low // in all non-pathological cases let pred: usize = data @@ -113,40 +113,40 @@ impl BlockData { .iter() .position(|&PredBlock { branch, .. }| branch == inst) .expect("the predecessor you are trying to remove is not declared"); - data.predecessors.swap_remove(pred).block + data.predecessors.swap_remove(pred).ssa_block } } } } struct PredBlock { - block: Block, + ssa_block: SSABlock, branch: Inst, } impl PredBlock { - fn new(block: Block, branch: Inst) -> Self { - Self { block, branch } + fn new(ssa_block: SSABlock, branch: Inst) -> Self { + Self { ssa_block, branch } } } type PredBlockSmallVec = SmallVec<[PredBlock; 4]>; -struct EbbHeaderBlockData { - // The predecessors of the Ebb header block, with the block and branch instruction. +struct BlockHeaderSSABlockData { + // The predecessors of the Block header block, with the block and branch instruction. predecessors: PredBlockSmallVec, - // A ebb header block is sealed if all of its predecessors have been declared. + // A block header block is sealed if all of its predecessors have been declared. sealed: bool, - // The ebb which this block is part of. - ebb: Ebb, - // List of current Ebb arguments for which an earlier def has not been found yet. + // The block which this block is part of. + block: Block, + // List of current Block arguments for which an earlier def has not been found yet. undef_variables: Vec<(Variable, Value)>, } /// A opaque reference to a basic block. #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub struct Block(u32); -impl EntityRef for Block { +pub struct SSABlock(u32); +impl EntityRef for SSABlock { fn new(index: usize) -> Self { debug_assert!(index < (u32::MAX as usize)); Self(index as u32) @@ -157,7 +157,7 @@ impl EntityRef for Block { } } -impl ReservedValue for Block { +impl ReservedValue for SSABlock { fn reserved_value() -> Self { Self(u32::MAX) } @@ -168,8 +168,8 @@ impl SSABuilder { pub fn new() -> Self { Self { variables: SecondaryMap::with_default(SecondaryMap::new()), - blocks: PrimaryMap::new(), - ebb_headers: SecondaryMap::new(), + ssa_blocks: PrimaryMap::new(), + block_headers: SecondaryMap::new(), calls: Vec::new(), results: Vec::new(), side_effects: SideEffects::new(), @@ -180,8 +180,8 @@ impl SSABuilder { /// deallocating memory. pub fn clear(&mut self) { self.variables.clear(); - self.blocks.clear(); - self.ebb_headers.clear(); + self.ssa_blocks.clear(); + self.block_headers.clear(); debug_assert!(self.calls.is_empty()); debug_assert!(self.results.is_empty()); debug_assert!(self.side_effects.is_empty()); @@ -190,8 +190,8 @@ impl SSABuilder { /// Tests whether an `SSABuilder` is in a cleared state. pub fn is_empty(&self) -> bool { self.variables.is_empty() - && self.blocks.is_empty() - && self.ebb_headers.is_empty() + && self.ssa_blocks.is_empty() + && self.block_headers.is_empty() && self.calls.is_empty() && self.results.is_empty() && self.side_effects.is_empty() @@ -210,15 +210,15 @@ enum ZeroOneOrMore { #[derive(Debug)] enum UseVarCases { Unsealed(Value), - SealedOnePredecessor(Block), - SealedMultiplePredecessors(Value, Ebb), + SealedOnePredecessor(SSABlock), + SealedMultiplePredecessors(Value, Block), } /// States for the `use_var`/`predecessors_lookup` state machine. enum Call { - UseVar(Block), - FinishSealedOnePredecessor(Block), - FinishPredecessorsLookup(Value, Ebb), + UseVar(SSABlock), + FinishSealedOnePredecessor(SSABlock), + FinishPredecessorsLookup(Value, Block), } /// Emit instructions to produce a zero value in the given type. @@ -257,7 +257,7 @@ fn emit_zero(ty: Type, mut cur: FuncCursor) -> Value { /// translating to Cranelift IR: /// /// - for each sequence of contiguous instructions (with no branches), create a corresponding -/// basic block with `declare_ebb_body_block` or `declare_ebb_header_block` depending on the +/// basic block with `declare_block_body_block` or `declare_block_header_block` depending on the /// position of the basic block; /// /// - while traversing a basic block and translating instruction, use `def_var` and `use_var` @@ -265,25 +265,25 @@ fn emit_zero(ty: Type, mut cur: FuncCursor) -> Value { /// SSA values; /// /// - when all the instructions in a basic block have translated, the block is said _filled_ and -/// only then you can add it as a predecessor to other blocks with `declare_ebb_predecessor`; +/// only then you can add it as a predecessor to other blocks with `declare_block_predecessor`; /// -/// - when you have constructed all the predecessor to a basic block at the beginning of an `Ebb`, -/// call `seal_ebb_header_block` on it with the `Function` that you are building. +/// - when you have constructed all the predecessor to a basic block at the beginning of a `Block`, +/// call `seal_block_header_block` on it with the `Function` that you are building. /// /// This API will give you the correct SSA values to use as arguments of your instructions, -/// as well as modify the jump instruction and `Ebb` headers parameters to account for the SSA +/// as well as modify the jump instruction and `Block` headers parameters to account for the SSA /// Phi functions. /// impl SSABuilder { /// Declares a new definition of a variable in a given basic block. /// The SSA value is passed as an argument because it should be created with /// `ir::DataFlowGraph::append_result`. - pub fn def_var(&mut self, var: Variable, val: Value, block: Block) { - self.variables[var][block] = PackedOption::from(val); + pub fn def_var(&mut self, var: Variable, val: Value, ssa_block: SSABlock) { + self.variables[var][ssa_block] = PackedOption::from(val); } /// Declares a use of a variable in a given basic block. Returns the SSA value corresponding - /// to the current SSA definition of this variable and a list of newly created Ebbs that + /// to the current SSA definition of this variable and a list of newly created Blocks that /// are the results of critical edge splitting for `br_table` with arguments. /// /// If the variable has never been defined in this blocks or recursively in its predecessors, @@ -294,12 +294,12 @@ impl SSABuilder { func: &mut Function, var: Variable, ty: Type, - block: Block, + ssa_block: SSABlock, ) -> (Value, SideEffects) { // First, try Local Value Numbering (Algorithm 1 in the paper). // If the variable already has a known Value in this block, use that. if let Some(var_defs) = self.variables.get(var) { - if let Some(val) = var_defs[block].expand() { + if let Some(val) = var_defs[ssa_block].expand() { return (val, SideEffects::new()); } } @@ -311,7 +311,7 @@ impl SSABuilder { debug_assert!(self.side_effects.is_empty()); // Prepare the 'calls' and 'results' stacks for the state machine. - self.use_var_nonlocal(func, var, ty, block); + self.use_var_nonlocal(func, var, ty, ssa_block); let value = self.run_state_machine(func, var, ty); let side_effects = mem::replace(&mut self.side_effects, SideEffects::new()); @@ -322,182 +322,190 @@ impl SSABuilder { /// Resolve the minimal SSA Value of `var` in `block` by traversing predecessors. /// /// This function sets up state for `run_state_machine()` but does not execute it. - fn use_var_nonlocal(&mut self, func: &mut Function, var: Variable, ty: Type, block: Block) { + fn use_var_nonlocal( + &mut self, + func: &mut Function, + var: Variable, + ty: Type, + ssa_block: SSABlock, + ) { // This function is split into two parts to appease the borrow checker. // Part 1: With a mutable borrow of self, update the DataFlowGraph if necessary. - let case = match self.blocks[block] { - BlockData::EbbHeader(ref mut data) => { - // The block has multiple predecessors so we append an Ebb parameter that + let case = match self.ssa_blocks[ssa_block] { + SSABlockData::BlockHeader(ref mut data) => { + // The block has multiple predecessors so we append an Block parameter that // will serve as a value. if data.sealed { if data.predecessors.len() == 1 { // Optimize the common case of one predecessor: no param needed. - UseVarCases::SealedOnePredecessor(data.predecessors[0].block) + UseVarCases::SealedOnePredecessor(data.predecessors[0].ssa_block) } else { // Break potential cycles by eagerly adding an operandless param. - let val = func.dfg.append_ebb_param(data.ebb, ty); - UseVarCases::SealedMultiplePredecessors(val, data.ebb) + let val = func.dfg.append_block_param(data.block, ty); + UseVarCases::SealedMultiplePredecessors(val, data.block) } } else { - let val = func.dfg.append_ebb_param(data.ebb, ty); + let val = func.dfg.append_block_param(data.block, ty); data.undef_variables.push((var, val)); UseVarCases::Unsealed(val) } } - BlockData::EbbBody { predecessor: pred } => UseVarCases::SealedOnePredecessor(pred), + SSABlockData::BlockBody { ssa_pred } => UseVarCases::SealedOnePredecessor(ssa_pred), }; // Part 2: Prepare SSABuilder state for run_state_machine(). match case { UseVarCases::SealedOnePredecessor(pred) => { // Get the Value directly from the single predecessor. - self.calls.push(Call::FinishSealedOnePredecessor(block)); + self.calls.push(Call::FinishSealedOnePredecessor(ssa_block)); self.calls.push(Call::UseVar(pred)); } UseVarCases::Unsealed(val) => { // Define the operandless param added above to prevent lookup cycles. - self.def_var(var, val, block); + self.def_var(var, val, ssa_block); // Nothing more can be known at this point. self.results.push(val); } - UseVarCases::SealedMultiplePredecessors(val, ebb) => { + UseVarCases::SealedMultiplePredecessors(val, block) => { // Define the operandless param added above to prevent lookup cycles. - self.def_var(var, val, block); + self.def_var(var, val, ssa_block); // Look up a use_var for each precessor. - self.begin_predecessors_lookup(val, ebb); + self.begin_predecessors_lookup(val, block); } } } /// For blocks with a single predecessor, once we've determined the value, /// record a local def for it for future queries to find. - fn finish_sealed_one_predecessor(&mut self, var: Variable, block: Block) { + fn finish_sealed_one_predecessor(&mut self, var: Variable, ssa_block: SSABlock) { let val = *self.results.last().unwrap(); - self.def_var(var, val, block); + self.def_var(var, val, ssa_block); } - /// Declares a new basic block belonging to the body of a certain `Ebb` and having `pred` + /// Declares a new basic block belonging to the body of a certain `Block` and having `pred` /// as a predecessor. `pred` is the only predecessor of the block and the block is sealed /// at creation. /// - /// To declare a `Ebb` header block, see `declare_ebb_header_block`. - pub fn declare_ebb_body_block(&mut self, pred: Block) -> Block { - self.blocks.push(BlockData::EbbBody { predecessor: pred }) + /// To declare a `Block` header block, see `declare_block_header_block`. + pub fn declare_block_body_block(&mut self, ssa_pred: SSABlock) -> SSABlock { + self.ssa_blocks.push(SSABlockData::BlockBody { ssa_pred }) } - /// Declares a new basic block at the beginning of an `Ebb`. No predecessors are declared + /// Declares a new basic block at the beginning of a `Block`. No predecessors are declared /// here and the block is not sealed. - /// Predecessors have to be added with `declare_ebb_predecessor`. - pub fn declare_ebb_header_block(&mut self, ebb: Ebb) -> Block { - let block = self.blocks.push(BlockData::EbbHeader(EbbHeaderBlockData { - predecessors: PredBlockSmallVec::new(), - sealed: false, - ebb, - undef_variables: Vec::new(), - })); - self.ebb_headers[ebb] = block.into(); - block + /// Predecessors have to be added with `declare_block_predecessor`. + pub fn declare_block_header_block(&mut self, block: Block) -> SSABlock { + let ssa_block = self + .ssa_blocks + .push(SSABlockData::BlockHeader(BlockHeaderSSABlockData { + predecessors: PredBlockSmallVec::new(), + sealed: false, + block, + undef_variables: Vec::new(), + })); + self.block_headers[block] = ssa_block.into(); + ssa_block } - /// Gets the header block corresponding to an Ebb, panics if the Ebb or the header block + /// Gets the header block corresponding to an Block, panics if the Block or the header block /// isn't declared. - pub fn header_block(&self, ebb: Ebb) -> Block { - self.ebb_headers - .get(ebb) - .expect("the ebb has not been declared") + pub fn header_block(&self, block: Block) -> SSABlock { + self.block_headers + .get(block) + .expect("the block has not been declared") .expand() .expect("the header block has not been defined") } - /// Declares a new predecessor for an `Ebb` header block and record the branch instruction + /// Declares a new predecessor for a `Block` header block and record the branch instruction /// of the predecessor that leads to it. /// - /// Note that the predecessor is a `Block` and not an `Ebb`. This `Block` must be filled + /// Note that the predecessor is a `SSABlock` and not a `Block`. This `SSABlock` must be filled /// before added as predecessor. Note that you must provide no jump arguments to the branch /// instruction when you create it since `SSABuilder` will fill them for you. /// /// Callers are expected to avoid adding the same predecessor more than once in the case /// of a jump table. - pub fn declare_ebb_predecessor(&mut self, ebb: Ebb, pred: Block, inst: Inst) { - debug_assert!(!self.is_sealed(ebb)); - let header_block = self.header_block(ebb); - self.blocks[header_block].add_predecessor(pred, inst) + pub fn declare_block_predecessor(&mut self, block: Block, ssa_pred: SSABlock, inst: Inst) { + debug_assert!(!self.is_sealed(block)); + let header_block = self.header_block(block); + self.ssa_blocks[header_block].add_predecessor(ssa_pred, inst) } - /// Remove a previously declared Ebb predecessor by giving a reference to the jump + /// Remove a previously declared Block predecessor by giving a reference to the jump /// instruction. Returns the basic block containing the instruction. /// /// Note: use only when you know what you are doing, this might break the SSA building problem - pub fn remove_ebb_predecessor(&mut self, ebb: Ebb, inst: Inst) -> Block { - debug_assert!(!self.is_sealed(ebb)); - let header_block = self.header_block(ebb); - self.blocks[header_block].remove_predecessor(inst) + pub fn remove_block_predecessor(&mut self, block: Block, inst: Inst) -> SSABlock { + debug_assert!(!self.is_sealed(block)); + let header_block = self.header_block(block); + self.ssa_blocks[header_block].remove_predecessor(inst) } - /// Completes the global value numbering for an `Ebb`, all of its predecessors having been + /// Completes the global value numbering for a `Block`, all of its predecessors having been /// already sealed. /// - /// This method modifies the function's `Layout` by adding arguments to the `Ebb`s to + /// This method modifies the function's `Layout` by adding arguments to the `Block`s to /// take into account the Phi function placed by the SSA algorithm. /// - /// Returns the list of newly created ebbs for critical edge splitting. - pub fn seal_ebb_header_block(&mut self, ebb: Ebb, func: &mut Function) -> SideEffects { - self.seal_one_ebb_header_block(ebb, func); + /// Returns the list of newly created blocks for critical edge splitting. + pub fn seal_block_header_block(&mut self, block: Block, func: &mut Function) -> SideEffects { + self.seal_one_block_header_block(block, func); mem::replace(&mut self.side_effects, SideEffects::new()) } - /// Completes the global value numbering for all `Ebb`s in `func`. + /// Completes the global value numbering for all `Block`s in `func`. /// - /// It's more efficient to seal `Ebb`s as soon as possible, during + /// It's more efficient to seal `Block`s as soon as possible, during /// translation, but for frontends where this is impractical to do, this /// function can be used at the end of translating all blocks to ensure /// that everything is sealed. - pub fn seal_all_ebb_header_blocks(&mut self, func: &mut Function) -> SideEffects { - // Seal all `Ebb`s currently in the function. This can entail splitting + pub fn seal_all_block_header_blocks(&mut self, func: &mut Function) -> SideEffects { + // Seal all `Block`s currently in the function. This can entail splitting // and creation of new blocks, however such new blocks are sealed on // the fly, so we don't need to account for them here. - for ebb in self.ebb_headers.keys() { - self.seal_one_ebb_header_block(ebb, func); + for block in self.block_headers.keys() { + self.seal_one_block_header_block(block, func); } mem::replace(&mut self.side_effects, SideEffects::new()) } - /// Helper function for `seal_ebb_header_block` and - /// `seal_all_ebb_header_blocks`. - fn seal_one_ebb_header_block(&mut self, ebb: Ebb, func: &mut Function) { - let block = self.header_block(ebb); + /// Helper function for `seal_block_header_block` and + /// `seal_all_block_header_blocks`. + fn seal_one_block_header_block(&mut self, block: Block, func: &mut Function) { + let ssa_block = self.header_block(block); - let undef_vars = match self.blocks[block] { - BlockData::EbbBody { .. } => panic!("this should not happen"), - BlockData::EbbHeader(ref mut data) => { + let undef_vars = match self.ssa_blocks[ssa_block] { + SSABlockData::BlockBody { .. } => panic!("this should not happen"), + SSABlockData::BlockHeader(ref mut data) => { debug_assert!( !data.sealed, "Attempting to seal {} which is already sealed.", - ebb + block ); - debug_assert_eq!(ebb, data.ebb); + debug_assert_eq!(block, data.block); // Extract the undef_variables data from the block so that we // can iterate over it without borrowing the whole builder. mem::replace(&mut data.undef_variables, Vec::new()) } }; - // For each undef var we look up values in the predecessors and create an EBB parameter + // For each undef var we look up values in the predecessors and create an block parameter // only if necessary. for (var, val) in undef_vars { let ty = func.dfg.value_type(val); - self.predecessors_lookup(func, val, var, ty, ebb); + self.predecessors_lookup(func, val, var, ty, block); } - self.mark_ebb_header_block_sealed(block); + self.mark_block_header_block_sealed(ssa_block); } /// Set the `sealed` flag for `block`. - fn mark_ebb_header_block_sealed(&mut self, block: Block) { + fn mark_block_header_block_sealed(&mut self, ssa_block: SSABlock) { // Then we mark the block as sealed. - match self.blocks[block] { - BlockData::EbbBody { .. } => panic!("this should not happen"), - BlockData::EbbHeader(ref mut data) => { + match self.ssa_blocks[ssa_block] { + SSABlockData::BlockBody { .. } => panic!("this should not happen"), + SSABlockData::BlockHeader(ref mut data) => { debug_assert!(!data.sealed); debug_assert!(data.undef_variables.is_empty()); data.sealed = true; @@ -508,21 +516,21 @@ impl SSABuilder { } } - /// Given the local SSA Value of a Variable in an Ebb, perform a recursive lookup on + /// Given the local SSA Value of a Variable in an Block, perform a recursive lookup on /// predecessors to determine if it is redundant with another Value earlier in the CFG. /// /// If such a Value exists and is redundant, the local Value is replaced by the - /// corresponding non-local Value. If the original Value was an Ebb parameter, + /// corresponding non-local Value. If the original Value was an Block parameter, /// the parameter may be removed if redundant. Parameters are placed eagerly by callers - /// to avoid infinite loops when looking up a Value for an Ebb that is in a CFG loop. + /// to avoid infinite loops when looking up a Value for an Block that is in a CFG loop. /// - /// Doing this lookup for each Value in each Ebb preserves SSA form during construction. + /// Doing this lookup for each Value in each Block preserves SSA form during construction. /// /// Returns the chosen Value. /// /// ## Arguments /// - /// `sentinel` is a dummy Ebb parameter inserted by `use_var_nonlocal()`. + /// `sentinel` is a dummy Block parameter inserted by `use_var_nonlocal()`. /// Its purpose is to allow detection of CFG cycles while traversing predecessors. /// /// The `sentinel: Value` and the `ty: Type` are describing the `var: Variable` @@ -533,30 +541,29 @@ impl SSABuilder { sentinel: Value, var: Variable, ty: Type, - ebb: Ebb, + block: Block, ) -> Value { debug_assert!(self.calls.is_empty()); debug_assert!(self.results.is_empty()); // self.side_effects may be non-empty here so that callers can // accumulate side effects over multiple calls. - self.begin_predecessors_lookup(sentinel, ebb); + self.begin_predecessors_lookup(sentinel, block); self.run_state_machine(func, var, ty) } /// Set up state for `run_state_machine()` to initiate non-local use lookups - /// in all predecessors of `dest_ebb`, and arrange for a call to + /// in all predecessors of `dest_block`, and arrange for a call to /// `finish_predecessors_lookup` once they complete. - fn begin_predecessors_lookup(&mut self, sentinel: Value, dest_ebb: Ebb) { + fn begin_predecessors_lookup(&mut self, sentinel: Value, dest_block: Block) { self.calls - .push(Call::FinishPredecessorsLookup(sentinel, dest_ebb)); + .push(Call::FinishPredecessorsLookup(sentinel, dest_block)); // Iterate over the predecessors. let mut calls = mem::replace(&mut self.calls, Vec::new()); - calls.extend( - self.predecessors(dest_ebb) - .iter() - .rev() - .map(|&PredBlock { block: pred, .. }| Call::UseVar(pred)), - ); + calls.extend(self.predecessors(dest_block).iter().rev().map( + |&PredBlock { + ssa_block: pred, .. + }| Call::UseVar(pred), + )); self.calls = calls; } @@ -567,12 +574,12 @@ impl SSABuilder { func: &mut Function, sentinel: Value, var: Variable, - dest_ebb: Ebb, + dest_block: Block, ) { let mut pred_values: ZeroOneOrMore = ZeroOneOrMore::Zero; // Determine how many predecessors are yielding unique, non-temporary Values. - let num_predecessors = self.predecessors(dest_ebb).len(); + let num_predecessors = self.predecessors(dest_block).len(); for &pred_val in self.results.iter().rev().take(num_predecessors) { match pred_values { ZeroOneOrMore::Zero => { @@ -600,21 +607,23 @@ impl SSABuilder { // The variable is used but never defined before. This is an irregularity in the // code, but rather than throwing an error we silently initialize the variable to // 0. This will have no effect since this situation happens in unreachable code. - if !func.layout.is_ebb_inserted(dest_ebb) { - func.layout.append_ebb(dest_ebb); + if !func.layout.is_block_inserted(dest_block) { + func.layout.append_block(dest_block); } - self.side_effects.instructions_added_to_ebbs.push(dest_ebb); + self.side_effects + .instructions_added_to_blocks + .push(dest_block); let zero = emit_zero( func.dfg.value_type(sentinel), - FuncCursor::new(func).at_first_insertion_point(dest_ebb), + FuncCursor::new(func).at_first_insertion_point(dest_block), ); - func.dfg.remove_ebb_param(sentinel); + func.dfg.remove_block_param(sentinel); func.dfg.change_to_alias(sentinel, zero); zero } ZeroOneOrMore::One(pred_val) => { // Here all the predecessors use a single value to represent our variable - // so we don't need to have it as an ebb argument. + // so we don't need to have it as an block argument. // We need to replace all the occurrences of val with pred_val but since // we can't afford a re-writing pass right now we just declare an alias. // Resolve aliases eagerly so that we can check for cyclic aliasing, @@ -624,44 +633,44 @@ impl SSABuilder { // Cycle detected. Break it by creating a zero value. resolved = emit_zero( func.dfg.value_type(sentinel), - FuncCursor::new(func).at_first_insertion_point(dest_ebb), + FuncCursor::new(func).at_first_insertion_point(dest_block), ); } - func.dfg.remove_ebb_param(sentinel); + func.dfg.remove_block_param(sentinel); func.dfg.change_to_alias(sentinel, resolved); resolved } ZeroOneOrMore::More => { // There is disagreement in the predecessors on which value to use so we have - // to keep the ebb argument. To avoid borrowing `self` for the whole loop, + // to keep the block argument. To avoid borrowing `self` for the whole loop, // temporarily detach the predecessors list and replace it with an empty list. let mut preds = - mem::replace(self.predecessors_mut(dest_ebb), PredBlockSmallVec::new()); + mem::replace(self.predecessors_mut(dest_block), PredBlockSmallVec::new()); for &mut PredBlock { - block: ref mut pred_block, + ssa_block: ref mut pred_ssa_block, branch: ref mut last_inst, } in &mut preds { // We already did a full `use_var` above, so we can do just the fast path. - let block_map = self.variables.get(var).unwrap(); - let pred_val = block_map.get(*pred_block).unwrap().unwrap(); + let ssa_block_map = self.variables.get(var).unwrap(); + let pred_val = ssa_block_map.get(*pred_ssa_block).unwrap().unwrap(); let jump_arg = self.append_jump_argument( func, *last_inst, - *pred_block, - dest_ebb, + *pred_ssa_block, + dest_block, pred_val, var, ); - if let Some((middle_ebb, middle_block, middle_jump_inst)) = jump_arg { - *pred_block = middle_block; + if let Some((middle_block, middle_ssa_block, middle_jump_inst)) = jump_arg { + *pred_ssa_block = middle_ssa_block; *last_inst = middle_jump_inst; - self.side_effects.split_ebbs_created.push(middle_ebb); + self.side_effects.split_blocks_created.push(middle_block); } } // Now that we're done, move the predecessors list back. - debug_assert!(self.predecessors(dest_ebb).is_empty()); - *self.predecessors_mut(dest_ebb) = preds; + debug_assert!(self.predecessors(dest_block).is_empty()); + *self.predecessors_mut(dest_block) = preds; sentinel } @@ -670,20 +679,20 @@ impl SSABuilder { self.results.push(result_val); } - /// Appends a jump argument to a jump instruction, returns ebb created in case of + /// Appends a jump argument to a jump instruction, returns block created in case of /// critical edge splitting. fn append_jump_argument( &mut self, func: &mut Function, jump_inst: Inst, - jump_inst_block: Block, - dest_ebb: Ebb, + jump_inst_ssa_block: SSABlock, + dest_block: Block, val: Value, var: Variable, - ) -> Option<(Ebb, Block, Inst)> { + ) -> Option<(Block, SSABlock, Inst)> { match func.dfg.analyze_branch(jump_inst) { BranchInfo::NotABranch => { - panic!("you have declared a non-branch instruction as a predecessor to an ebb"); + panic!("you have declared a non-branch instruction as a predecessor to an block"); } // For a single destination appending a jump argument to the instruction // is sufficient. @@ -691,24 +700,24 @@ impl SSABuilder { func.dfg.append_inst_arg(jump_inst, val); None } - BranchInfo::Table(jt, default_ebb) => { + BranchInfo::Table(jt, default_block) => { // In the case of a jump table, the situation is tricky because br_table doesn't // support arguments. // We have to split the critical edge - let middle_ebb = func.dfg.make_ebb(); - func.layout.append_ebb(middle_ebb); - let middle_block = self.declare_ebb_header_block(middle_ebb); - self.blocks[middle_block].add_predecessor(jump_inst_block, jump_inst); - self.mark_ebb_header_block_sealed(middle_block); + let middle_block = func.dfg.make_block(); + func.layout.append_block(middle_block); + let middle_ssa_block = self.declare_block_header_block(middle_block); + self.ssa_blocks[middle_ssa_block].add_predecessor(jump_inst_ssa_block, jump_inst); + self.mark_block_header_block_sealed(middle_ssa_block); - if let Some(default_ebb) = default_ebb { - if dest_ebb == default_ebb { + if let Some(default_block) = default_block { + if dest_block == default_block { match func.dfg[jump_inst] { InstructionData::BranchTable { destination: ref mut dest, .. } => { - *dest = middle_ebb; + *dest = middle_block; } _ => panic!("should not happen"), } @@ -716,46 +725,46 @@ impl SSABuilder { } for old_dest in func.jump_tables[jt].as_mut_slice() { - if *old_dest == dest_ebb { - *old_dest = middle_ebb; + if *old_dest == dest_block { + *old_dest = middle_block; } } - let mut cur = FuncCursor::new(func).at_bottom(middle_ebb); - let middle_jump_inst = cur.ins().jump(dest_ebb, &[val]); - self.def_var(var, val, middle_block); - Some((middle_ebb, middle_block, middle_jump_inst)) + let mut cur = FuncCursor::new(func).at_bottom(middle_block); + let middle_jump_inst = cur.ins().jump(dest_block, &[val]); + self.def_var(var, val, middle_ssa_block); + Some((middle_block, middle_ssa_block, middle_jump_inst)) } } } - /// Returns the list of `Ebb`s that have been declared as predecessors of the argument. - fn predecessors(&self, ebb: Ebb) -> &[PredBlock] { - let block = self.header_block(ebb); - match self.blocks[block] { - BlockData::EbbBody { .. } => panic!("should not happen"), - BlockData::EbbHeader(ref data) => &data.predecessors, + /// Returns the list of `Block`s that have been declared as predecessors of the argument. + fn predecessors(&self, block: Block) -> &[PredBlock] { + let ssa_block = self.header_block(block); + match self.ssa_blocks[ssa_block] { + SSABlockData::BlockBody { .. } => panic!("should not happen"), + SSABlockData::BlockHeader(ref data) => &data.predecessors, } } - /// Returns whether the given Ebb has any predecessor or not. - pub fn has_any_predecessors(&self, ebb: Ebb) -> bool { - !self.predecessors(ebb).is_empty() + /// Returns whether the given Block has any predecessor or not. + pub fn has_any_predecessors(&self, block: Block) -> bool { + !self.predecessors(block).is_empty() } /// Same as predecessors, but for &mut. - fn predecessors_mut(&mut self, ebb: Ebb) -> &mut PredBlockSmallVec { - let block = self.header_block(ebb); - match self.blocks[block] { - BlockData::EbbBody { .. } => panic!("should not happen"), - BlockData::EbbHeader(ref mut data) => &mut data.predecessors, + fn predecessors_mut(&mut self, block: Block) -> &mut PredBlockSmallVec { + let ssa_block = self.header_block(block); + match self.ssa_blocks[ssa_block] { + SSABlockData::BlockBody { .. } => panic!("should not happen"), + SSABlockData::BlockHeader(ref mut data) => &mut data.predecessors, } } - /// Returns `true` if and only if `seal_ebb_header_block` has been called on the argument. - pub fn is_sealed(&self, ebb: Ebb) -> bool { - match self.blocks[self.header_block(ebb)] { - BlockData::EbbBody { .. } => panic!("should not happen"), - BlockData::EbbHeader(ref data) => data.sealed, + /// Returns `true` if and only if `seal_block_header_block` has been called on the argument. + pub fn is_sealed(&self, block: Block) -> bool { + match self.ssa_blocks[self.header_block(block)] { + SSABlockData::BlockBody { .. } => panic!("should not happen"), + SSABlockData::BlockHeader(ref data) => data.sealed, } } @@ -768,21 +777,21 @@ impl SSABuilder { // Process the calls scheduled in `self.calls` until it is empty. while let Some(call) = self.calls.pop() { match call { - Call::UseVar(block) => { + Call::UseVar(ssa_block) => { // First we lookup for the current definition of the variable in this block if let Some(var_defs) = self.variables.get(var) { - if let Some(val) = var_defs[block].expand() { + if let Some(val) = var_defs[ssa_block].expand() { self.results.push(val); continue; } } - self.use_var_nonlocal(func, var, ty, block); + self.use_var_nonlocal(func, var, ty, ssa_block); } - Call::FinishSealedOnePredecessor(block) => { - self.finish_sealed_one_predecessor(var, block); + Call::FinishSealedOnePredecessor(ssa_block) => { + self.finish_sealed_one_predecessor(var, ssa_block); } - Call::FinishPredecessorsLookup(sentinel, dest_ebb) => { - self.finish_predecessors_lookup(func, sentinel, var, dest_ebb); + Call::FinishPredecessorsLookup(sentinel, dest_block) => { + self.finish_predecessors_lookup(func, sentinel, var, dest_block); } } } @@ -807,124 +816,124 @@ mod tests { fn simple_block() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: // x = 1; // y = 2; // z = x + y; // z = x + z; - let block = ssa.declare_ebb_header_block(ebb0); + let ssa_block = ssa.declare_block_header_block(block0); let x_var = Variable::new(0); let x_ssa = { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); + cur.insert_block(block0); cur.ins().iconst(I32, 1) }; - ssa.def_var(x_var, x_ssa, block); + ssa.def_var(x_var, x_ssa, ssa_block); let y_var = Variable::new(1); let y_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iconst(I32, 2) }; - ssa.def_var(y_var, y_ssa, block); + ssa.def_var(y_var, y_ssa, ssa_block); - assert_eq!(ssa.use_var(&mut func, x_var, I32, block).0, x_ssa); - assert_eq!(ssa.use_var(&mut func, y_var, I32, block).0, y_ssa); + assert_eq!(ssa.use_var(&mut func, x_var, I32, ssa_block).0, x_ssa); + assert_eq!(ssa.use_var(&mut func, y_var, I32, ssa_block).0, y_ssa); let z_var = Variable::new(2); - let x_use1 = ssa.use_var(&mut func, x_var, I32, block).0; - let y_use1 = ssa.use_var(&mut func, y_var, I32, block).0; + let x_use1 = ssa.use_var(&mut func, x_var, I32, ssa_block).0; + let y_use1 = ssa.use_var(&mut func, y_var, I32, ssa_block).0; let z1_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x_use1, y_use1) }; - ssa.def_var(z_var, z1_ssa, block); - assert_eq!(ssa.use_var(&mut func, z_var, I32, block).0, z1_ssa); - let x_use2 = ssa.use_var(&mut func, x_var, I32, block).0; - let z_use1 = ssa.use_var(&mut func, z_var, I32, block).0; + ssa.def_var(z_var, z1_ssa, ssa_block); + assert_eq!(ssa.use_var(&mut func, z_var, I32, ssa_block).0, z1_ssa); + let x_use2 = ssa.use_var(&mut func, x_var, I32, ssa_block).0; + let z_use1 = ssa.use_var(&mut func, z_var, I32, ssa_block).0; let z2_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x_use2, z_use1) }; - ssa.def_var(z_var, z2_ssa, block); - assert_eq!(ssa.use_var(&mut func, z_var, I32, block).0, z2_ssa); + ssa.def_var(z_var, z2_ssa, ssa_block); + assert_eq!(ssa.use_var(&mut func, z_var, I32, ssa_block).0, z2_ssa); } #[test] fn sequence_of_blocks() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: - // ebb0: + // block0: // x = 1; // y = 2; // z = x + y; - // brnz y, ebb1; + // brnz y, block1; // z = x + z; - // ebb1: + // block1: // y = x + y; - let block0 = ssa.declare_ebb_header_block(ebb0); + let ssa_block0 = ssa.declare_block_header_block(block0); let x_var = Variable::new(0); let x_ssa = { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.goto_bottom(block0); cur.ins().iconst(I32, 1) }; - ssa.def_var(x_var, x_ssa, block0); + ssa.def_var(x_var, x_ssa, ssa_block0); let y_var = Variable::new(1); let y_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iconst(I32, 2) }; - ssa.def_var(y_var, y_ssa, block0); - assert_eq!(ssa.use_var(&mut func, x_var, I32, block0).0, x_ssa); - assert_eq!(ssa.use_var(&mut func, y_var, I32, block0).0, y_ssa); + ssa.def_var(y_var, y_ssa, ssa_block0); + assert_eq!(ssa.use_var(&mut func, x_var, I32, ssa_block0).0, x_ssa); + assert_eq!(ssa.use_var(&mut func, y_var, I32, ssa_block0).0, y_ssa); let z_var = Variable::new(2); - let x_use1 = ssa.use_var(&mut func, x_var, I32, block0).0; - let y_use1 = ssa.use_var(&mut func, y_var, I32, block0).0; + let x_use1 = ssa.use_var(&mut func, x_var, I32, ssa_block0).0; + let y_use1 = ssa.use_var(&mut func, y_var, I32, ssa_block0).0; let z1_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x_use1, y_use1) }; - ssa.def_var(z_var, z1_ssa, block0); - assert_eq!(ssa.use_var(&mut func, z_var, I32, block0).0, z1_ssa); - let y_use2 = ssa.use_var(&mut func, y_var, I32, block0).0; + ssa.def_var(z_var, z1_ssa, ssa_block0); + assert_eq!(ssa.use_var(&mut func, z_var, I32, ssa_block0).0, z1_ssa); + let y_use2 = ssa.use_var(&mut func, y_var, I32, ssa_block0).0; let jump_inst: Inst = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); - cur.ins().brnz(y_use2, ebb1, &[]) + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); + cur.ins().brnz(y_use2, block1, &[]) }; - let block1 = ssa.declare_ebb_body_block(block0); - let x_use2 = ssa.use_var(&mut func, x_var, I32, block1).0; + let ssa_block1 = ssa.declare_block_body_block(ssa_block0); + let x_use2 = ssa.use_var(&mut func, x_var, I32, ssa_block1).0; assert_eq!(x_use2, x_ssa); - let z_use1 = ssa.use_var(&mut func, z_var, I32, block1).0; + let z_use1 = ssa.use_var(&mut func, z_var, I32, ssa_block1).0; assert_eq!(z_use1, z1_ssa); let z2_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x_use2, z_use1) }; - ssa.def_var(z_var, z2_ssa, block1); - assert_eq!(ssa.use_var(&mut func, z_var, I32, block1).0, z2_ssa); - ssa.seal_ebb_header_block(ebb0, &mut func); - let block2 = ssa.declare_ebb_header_block(ebb1); - ssa.declare_ebb_predecessor(ebb1, block0, jump_inst); - ssa.seal_ebb_header_block(ebb1, &mut func); - let x_use3 = ssa.use_var(&mut func, x_var, I32, block2).0; + ssa.def_var(z_var, z2_ssa, ssa_block1); + assert_eq!(ssa.use_var(&mut func, z_var, I32, ssa_block1).0, z2_ssa); + ssa.seal_block_header_block(block0, &mut func); + let ssa_block2 = ssa.declare_block_header_block(block1); + ssa.declare_block_predecessor(block1, ssa_block0, jump_inst); + ssa.seal_block_header_block(block1, &mut func); + let x_use3 = ssa.use_var(&mut func, x_var, I32, ssa_block2).0; assert_eq!(x_ssa, x_use3); - let y_use3 = ssa.use_var(&mut func, y_var, I32, block2).0; + let y_use3 = ssa.use_var(&mut func, y_var, I32, ssa_block2).0; assert_eq!(y_ssa, y_use3); let y2_ssa = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x_use3, y_use3) }; - ssa.def_var(y_var, y2_ssa, block2); + ssa.def_var(y_var, y2_ssa, ssa_block2); match func.dfg.analyze_branch(jump_inst) { BranchInfo::SingleDest(dest, jump_args) => { - assert_eq!(dest, ebb1); + assert_eq!(dest, block1); assert_eq!(jump_args.len(), 0); } _ => assert!(false), @@ -935,110 +944,110 @@ mod tests { fn program_with_loop() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: - // ebb0: + // block0: // x = 1; // y = 2; // z = x + y; - // jump ebb1 - // ebb1: + // jump block1 + // block1: // z = z + y; - // brnz y, ebb1; + // brnz y, block1; // z = z - x; // return y - // ebb2: + // block2: // y = y - x - // jump ebb1 + // jump block1 - let block0 = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); + let ssa_block0 = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); let x_var = Variable::new(0); let x1 = { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.insert_ebb(ebb2); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.insert_block(block2); + cur.goto_bottom(block0); cur.ins().iconst(I32, 1) }; - ssa.def_var(x_var, x1, block0); - assert_eq!(ssa.use_var(&mut func, x_var, I32, block0).0, x1); + ssa.def_var(x_var, x1, ssa_block0); + assert_eq!(ssa.use_var(&mut func, x_var, I32, ssa_block0).0, x1); let y_var = Variable::new(1); let y1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iconst(I32, 2) }; - ssa.def_var(y_var, y1, block0); - assert_eq!(ssa.use_var(&mut func, y_var, I32, block0).0, y1); + ssa.def_var(y_var, y1, ssa_block0); + assert_eq!(ssa.use_var(&mut func, y_var, I32, ssa_block0).0, y1); let z_var = Variable::new(2); - let x2 = ssa.use_var(&mut func, x_var, I32, block0).0; + let x2 = ssa.use_var(&mut func, x_var, I32, ssa_block0).0; assert_eq!(x2, x1); - let y2 = ssa.use_var(&mut func, y_var, I32, block0).0; + let y2 = ssa.use_var(&mut func, y_var, I32, ssa_block0).0; assert_eq!(y2, y1); let z1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iadd(x2, y2) }; - ssa.def_var(z_var, z1, block0); - let jump_ebb0_ebb1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); - cur.ins().jump(ebb1, &[]) + ssa.def_var(z_var, z1, ssa_block0); + let jump_block0_block1 = { + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); + cur.ins().jump(block1, &[]) }; - let block1 = ssa.declare_ebb_header_block(ebb1); - ssa.declare_ebb_predecessor(ebb1, block0, jump_ebb0_ebb1); - let z2 = ssa.use_var(&mut func, z_var, I32, block1).0; - let y3 = ssa.use_var(&mut func, y_var, I32, block1).0; + let ssa_block1 = ssa.declare_block_header_block(block1); + ssa.declare_block_predecessor(block1, ssa_block0, jump_block0_block1); + let z2 = ssa.use_var(&mut func, z_var, I32, ssa_block1).0; + let y3 = ssa.use_var(&mut func, y_var, I32, ssa_block1).0; let z3 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().iadd(z2, y3) }; - ssa.def_var(z_var, z3, block1); - let y4 = ssa.use_var(&mut func, y_var, I32, block1).0; + ssa.def_var(z_var, z3, ssa_block1); + let y4 = ssa.use_var(&mut func, y_var, I32, ssa_block1).0; assert_eq!(y4, y3); - let jump_ebb1_ebb2 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); - cur.ins().brnz(y4, ebb2, &[]) + let jump_block1_block2 = { + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); + cur.ins().brnz(y4, block2, &[]) }; - let block2 = ssa.declare_ebb_body_block(block1); - let z4 = ssa.use_var(&mut func, z_var, I32, block2).0; + let ssa_block2 = ssa.declare_block_body_block(ssa_block1); + let z4 = ssa.use_var(&mut func, z_var, I32, ssa_block2).0; assert_eq!(z4, z3); - let x3 = ssa.use_var(&mut func, x_var, I32, block2).0; + let x3 = ssa.use_var(&mut func, x_var, I32, ssa_block2).0; let z5 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().isub(z4, x3) }; - ssa.def_var(z_var, z5, block2); - let y5 = ssa.use_var(&mut func, y_var, I32, block2).0; + ssa.def_var(z_var, z5, ssa_block2); + let y5 = ssa.use_var(&mut func, y_var, I32, ssa_block2).0; assert_eq!(y5, y3); { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().return_(&[y5]) }; - let block3 = ssa.declare_ebb_header_block(ebb2); - ssa.declare_ebb_predecessor(ebb2, block1, jump_ebb1_ebb2); - ssa.seal_ebb_header_block(ebb2, &mut func); - let y6 = ssa.use_var(&mut func, y_var, I32, block3).0; + let ssa_block3 = ssa.declare_block_header_block(block2); + ssa.declare_block_predecessor(block2, ssa_block1, jump_block1_block2); + ssa.seal_block_header_block(block2, &mut func); + let y6 = ssa.use_var(&mut func, y_var, I32, ssa_block3).0; assert_eq!(y6, y3); - let x4 = ssa.use_var(&mut func, x_var, I32, block3).0; + let x4 = ssa.use_var(&mut func, x_var, I32, ssa_block3).0; assert_eq!(x4, x3); let y7 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb2); + let mut cur = FuncCursor::new(&mut func).at_bottom(block2); cur.ins().isub(y6, x4) }; - ssa.def_var(y_var, y7, block3); - let jump_ebb2_ebb1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb2); - cur.ins().jump(ebb1, &[]) + ssa.def_var(y_var, y7, ssa_block3); + let jump_block2_block1 = { + let mut cur = FuncCursor::new(&mut func).at_bottom(block2); + cur.ins().jump(block1, &[]) }; - ssa.declare_ebb_predecessor(ebb1, block3, jump_ebb2_ebb1); - ssa.seal_ebb_header_block(ebb1, &mut func); - assert_eq!(func.dfg.ebb_params(ebb1)[0], z2); - assert_eq!(func.dfg.ebb_params(ebb1)[1], y3); + ssa.declare_block_predecessor(block1, ssa_block3, jump_block2_block1); + ssa.seal_block_header_block(block1, &mut func); + assert_eq!(func.dfg.block_params(block1)[0], z2); + assert_eq!(func.dfg.block_params(block1)[1], y3); assert_eq!(func.dfg.resolve_aliases(x3), x1); } @@ -1049,14 +1058,14 @@ mod tests { // Here is the pseudo-program we want to translate: // // function %f { - // jt = jump_table [ebb2, ebb1] - // ebb0: + // jt = jump_table [block2, block1] + // block0: // x = 1; - // br_table x, ebb2, jt - // ebb1: + // br_table x, block2, jt + // block1: // x = 2 - // jump ebb2 - // ebb2: + // jump block2 + // block2: // x = x + 1 // return // } @@ -1064,70 +1073,70 @@ mod tests { let mut func = Function::new(); let mut ssa = SSABuilder::new(); let mut jump_table = JumpTableData::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); - // ebb0: + // block0: // x = 1; - let block0 = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); + let ssa_block0 = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); let x_var = Variable::new(0); let x1 = { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.insert_ebb(ebb2); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.insert_block(block2); + cur.goto_bottom(block0); cur.ins().iconst(I32, 1) }; - ssa.def_var(x_var, x1, block0); + ssa.def_var(x_var, x1, ssa_block0); - // jt = jump_table [ebb2, ebb1] - jump_table.push_entry(ebb2); - jump_table.push_entry(ebb1); + // jt = jump_table [block2, block1] + jump_table.push_entry(block2); + jump_table.push_entry(block1); let jt = func.create_jump_table(jump_table); - // ebb0: + // block0: // ... - // br_table x, ebb2, jt - ssa.use_var(&mut func, x_var, I32, block0).0; + // br_table x, block2, jt + ssa.use_var(&mut func, x_var, I32, ssa_block0).0; let br_table = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); - cur.ins().br_table(x1, ebb2, jt) + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); + cur.ins().br_table(x1, block2, jt) }; - // ebb1: + // block1: // x = 2 - // jump ebb2 - let block1 = ssa.declare_ebb_header_block(ebb1); - ssa.seal_ebb_header_block(ebb1, &mut func); + // jump block2 + let ssa_block1 = ssa.declare_block_header_block(block1); + ssa.seal_block_header_block(block1, &mut func); let x2 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().iconst(I32, 2) }; - ssa.def_var(x_var, x2, block1); + ssa.def_var(x_var, x2, ssa_block1); let jump_inst = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); - cur.ins().jump(ebb2, &[]) + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); + cur.ins().jump(block2, &[]) }; - // ebb2: + // block2: // x = x + 1 // return - let block3 = ssa.declare_ebb_header_block(ebb2); - ssa.declare_ebb_predecessor(ebb2, block1, jump_inst); - ssa.declare_ebb_predecessor(ebb2, block0, br_table); - ssa.seal_ebb_header_block(ebb2, &mut func); - let block4 = ssa.declare_ebb_body_block(block3); - let x3 = ssa.use_var(&mut func, x_var, I32, block4).0; + let ssa_block3 = ssa.declare_block_header_block(block2); + ssa.declare_block_predecessor(block2, ssa_block1, jump_inst); + ssa.declare_block_predecessor(block2, ssa_block0, br_table); + ssa.seal_block_header_block(block2, &mut func); + let ssa_block4 = ssa.declare_block_body_block(ssa_block3); + let x3 = ssa.use_var(&mut func, x_var, I32, ssa_block4).0; let x4 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb2); + let mut cur = FuncCursor::new(&mut func).at_bottom(block2); cur.ins().iadd_imm(x3, 1) }; - ssa.def_var(x_var, x4, block4); + ssa.def_var(x_var, x4, ssa_block4); { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb2); + let mut cur = FuncCursor::new(&mut func).at_bottom(block2); cur.ins().return_(&[]) }; @@ -1147,75 +1156,75 @@ mod tests { fn undef_values_reordering() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: - // ebb0: + // block0: // x = 0 // y = 1 // z = 2 - // jump ebb1 - // ebb1: + // jump block1 + // block1: // x = z + x // y = y - x - // jump ebb1 + // jump block1 // - let block0 = ssa.declare_ebb_header_block(ebb0); + let ssa_block0 = ssa.declare_block_header_block(block0); let x_var = Variable::new(0); let y_var = Variable::new(1); let z_var = Variable::new(2); - ssa.seal_ebb_header_block(ebb0, &mut func); + ssa.seal_block_header_block(block0, &mut func); let x1 = { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.goto_bottom(block0); cur.ins().iconst(I32, 0) }; - ssa.def_var(x_var, x1, block0); + ssa.def_var(x_var, x1, ssa_block0); let y1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iconst(I32, 1) }; - ssa.def_var(y_var, y1, block0); + ssa.def_var(y_var, y1, ssa_block0); let z1 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); cur.ins().iconst(I32, 2) }; - ssa.def_var(z_var, z1, block0); + ssa.def_var(z_var, z1, ssa_block0); let jump_inst = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb0); - cur.ins().jump(ebb1, &[]) + let mut cur = FuncCursor::new(&mut func).at_bottom(block0); + cur.ins().jump(block1, &[]) }; - let block1 = ssa.declare_ebb_header_block(ebb1); - ssa.declare_ebb_predecessor(ebb1, block0, jump_inst); - let z2 = ssa.use_var(&mut func, z_var, I32, block1).0; - assert_eq!(func.dfg.ebb_params(ebb1)[0], z2); - let x2 = ssa.use_var(&mut func, x_var, I32, block1).0; - assert_eq!(func.dfg.ebb_params(ebb1)[1], x2); + let ssa_block1 = ssa.declare_block_header_block(block1); + ssa.declare_block_predecessor(block1, ssa_block0, jump_inst); + let z2 = ssa.use_var(&mut func, z_var, I32, ssa_block1).0; + assert_eq!(func.dfg.block_params(block1)[0], z2); + let x2 = ssa.use_var(&mut func, x_var, I32, ssa_block1).0; + assert_eq!(func.dfg.block_params(block1)[1], x2); let x3 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().iadd(x2, z2) }; - ssa.def_var(x_var, x3, block1); - let x4 = ssa.use_var(&mut func, x_var, I32, block1).0; - let y3 = ssa.use_var(&mut func, y_var, I32, block1).0; - assert_eq!(func.dfg.ebb_params(ebb1)[2], y3); + ssa.def_var(x_var, x3, ssa_block1); + let x4 = ssa.use_var(&mut func, x_var, I32, ssa_block1).0; + let y3 = ssa.use_var(&mut func, y_var, I32, ssa_block1).0; + assert_eq!(func.dfg.block_params(block1)[2], y3); let y4 = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); cur.ins().isub(y3, x4) }; - ssa.def_var(y_var, y4, block1); + ssa.def_var(y_var, y4, ssa_block1); let jump_inst = { - let mut cur = FuncCursor::new(&mut func).at_bottom(ebb1); - cur.ins().jump(ebb1, &[]) + let mut cur = FuncCursor::new(&mut func).at_bottom(block1); + cur.ins().jump(block1, &[]) }; - ssa.declare_ebb_predecessor(ebb1, block1, jump_inst); - ssa.seal_ebb_header_block(ebb1, &mut func); + ssa.declare_block_predecessor(block1, ssa_block1, jump_inst); + ssa.seal_block_header_block(block1, &mut func); // At sealing the "z" argument disappear but the remaining "x" and "y" args have to be // in the right order. - assert_eq!(func.dfg.ebb_params(ebb1)[1], y3); - assert_eq!(func.dfg.ebb_params(ebb1)[0], x2); + assert_eq!(func.dfg.block_params(block1)[1], y3); + assert_eq!(func.dfg.block_params(block1)[0], x2); } #[test] @@ -1223,20 +1232,20 @@ mod tests { // Use vars of various types which have not been defined. let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let block = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); + let block0 = func.dfg.make_block(); + let ssa_block = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); let i32_var = Variable::new(0); let f32_var = Variable::new(1); let f64_var = Variable::new(2); let b1_var = Variable::new(3); let f32x4_var = Variable::new(4); - ssa.use_var(&mut func, i32_var, I32, block); - ssa.use_var(&mut func, f32_var, F32, block); - ssa.use_var(&mut func, f64_var, F64, block); - ssa.use_var(&mut func, b1_var, B1, block); - ssa.use_var(&mut func, f32x4_var, F32X4, block); - assert_eq!(func.dfg.num_ebb_params(ebb0), 0); + ssa.use_var(&mut func, i32_var, I32, ssa_block); + ssa.use_var(&mut func, f32_var, F32, ssa_block); + ssa.use_var(&mut func, f64_var, F64, ssa_block); + ssa.use_var(&mut func, b1_var, B1, ssa_block); + ssa.use_var(&mut func, f32x4_var, F32X4, ssa_block); + assert_eq!(func.dfg.num_block_params(block0), 0); } #[test] @@ -1245,15 +1254,15 @@ mod tests { // top of the entry block, and then fall back to inserting an iconst. let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let block = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); + let block0 = func.dfg.make_block(); + let ssa_block = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); let x_var = Variable::new(0); - assert_eq!(func.dfg.num_ebb_params(ebb0), 0); - ssa.use_var(&mut func, x_var, I32, block); - assert_eq!(func.dfg.num_ebb_params(ebb0), 0); + assert_eq!(func.dfg.num_block_params(block0), 0); + ssa.use_var(&mut func, x_var, I32, ssa_block); + assert_eq!(func.dfg.num_block_params(block0), 0); assert_eq!( - func.dfg[func.layout.first_inst(ebb0).unwrap()].opcode(), + func.dfg[func.layout.first_inst(block0).unwrap()].opcode(), Opcode::Iconst ); } @@ -1262,19 +1271,19 @@ mod tests { fn undef_in_entry_sealed_after() { // Use a var which has not been defined, but the block is not sealed // until afterward. Before sealing, the SSA builder should insert an - // ebb param; after sealing, it should be removed. + // block param; after sealing, it should be removed. let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let block = ssa.declare_ebb_header_block(ebb0); + let block0 = func.dfg.make_block(); + let ssa_block = ssa.declare_block_header_block(block0); let x_var = Variable::new(0); - assert_eq!(func.dfg.num_ebb_params(ebb0), 0); - ssa.use_var(&mut func, x_var, I32, block); - assert_eq!(func.dfg.num_ebb_params(ebb0), 1); - ssa.seal_ebb_header_block(ebb0, &mut func); - assert_eq!(func.dfg.num_ebb_params(ebb0), 0); + assert_eq!(func.dfg.num_block_params(block0), 0); + ssa.use_var(&mut func, x_var, I32, ssa_block); + assert_eq!(func.dfg.num_block_params(block0), 1); + ssa.seal_block_header_block(block0, &mut func); + assert_eq!(func.dfg.num_block_params(block0), 0); assert_eq!( - func.dfg[func.layout.first_inst(ebb0).unwrap()].opcode(), + func.dfg[func.layout.first_inst(block0).unwrap()].opcode(), Opcode::Iconst ); } @@ -1283,33 +1292,33 @@ mod tests { fn unreachable_use() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: - // ebb0: + // block0: // return - // ebb1: - // brz v1, ebb1 - // jump ebb1 - let _block0 = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); - let block1 = ssa.declare_ebb_header_block(ebb1); - let block2 = ssa.declare_ebb_body_block(block1); + // block1: + // brz v1, block1 + // jump block1 + let _ssa_block0 = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); + let ssa_block1 = ssa.declare_block_header_block(block1); + let ssa_block2 = ssa.declare_block_body_block(ssa_block1); { let mut cur = FuncCursor::new(&mut func); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.goto_bottom(block0); cur.ins().return_(&[]); let x_var = Variable::new(0); - cur.goto_bottom(ebb1); - let val = ssa.use_var(&mut cur.func, x_var, I32, block1).0; - let brz = cur.ins().brz(val, ebb1, &[]); - ssa.declare_ebb_predecessor(ebb1, block1, brz); - let j = cur.ins().jump(ebb1, &[]); - ssa.declare_ebb_predecessor(ebb1, block2, j); + cur.goto_bottom(block1); + let val = ssa.use_var(&mut cur.func, x_var, I32, ssa_block1).0; + let brz = cur.ins().brz(val, block1, &[]); + ssa.declare_block_predecessor(block1, ssa_block1, brz); + let j = cur.ins().jump(block1, &[]); + ssa.declare_block_predecessor(block1, ssa_block2, j); } - ssa.seal_ebb_header_block(ebb1, &mut func); + ssa.seal_block_header_block(block1, &mut func); let flags = settings::Flags::new(settings::builder()); match verify_function(&func, &flags) { Ok(()) => {} @@ -1326,41 +1335,41 @@ mod tests { fn unreachable_use_with_multiple_preds() { let mut func = Function::new(); let mut ssa = SSABuilder::new(); - let ebb0 = func.dfg.make_ebb(); - let ebb1 = func.dfg.make_ebb(); - let ebb2 = func.dfg.make_ebb(); + let block0 = func.dfg.make_block(); + let block1 = func.dfg.make_block(); + let block2 = func.dfg.make_block(); // Here is the pseudo-program we want to translate: - // ebb0: + // block0: // return - // ebb1: - // brz v1, ebb2 - // jump ebb1 - // ebb2: - // jump ebb1 - let _block0 = ssa.declare_ebb_header_block(ebb0); - ssa.seal_ebb_header_block(ebb0, &mut func); - let block1 = ssa.declare_ebb_header_block(ebb1); - let block2 = ssa.declare_ebb_header_block(ebb2); + // block1: + // brz v1, block2 + // jump block1 + // block2: + // jump block1 + let _ssa_block0 = ssa.declare_block_header_block(block0); + ssa.seal_block_header_block(block0, &mut func); + let ssa_block1 = ssa.declare_block_header_block(block1); + let ssa_block2 = ssa.declare_block_header_block(block2); { let mut cur = FuncCursor::new(&mut func); let x_var = Variable::new(0); - cur.insert_ebb(ebb0); - cur.insert_ebb(ebb1); - cur.insert_ebb(ebb2); - cur.goto_bottom(ebb0); + cur.insert_block(block0); + cur.insert_block(block1); + cur.insert_block(block2); + cur.goto_bottom(block0); cur.ins().return_(&[]); - cur.goto_bottom(ebb1); - let v = ssa.use_var(&mut cur.func, x_var, I32, block1).0; - let brz = cur.ins().brz(v, ebb2, &[]); - let j0 = cur.ins().jump(ebb1, &[]); - cur.goto_bottom(ebb2); - let j1 = cur.ins().jump(ebb1, &[]); - ssa.declare_ebb_predecessor(ebb1, block2, brz); - ssa.declare_ebb_predecessor(ebb1, block1, j0); - ssa.declare_ebb_predecessor(ebb2, block1, j1); + cur.goto_bottom(block1); + let v = ssa.use_var(&mut cur.func, x_var, I32, ssa_block1).0; + let brz = cur.ins().brz(v, block2, &[]); + let j0 = cur.ins().jump(block1, &[]); + cur.goto_bottom(block2); + let j1 = cur.ins().jump(block1, &[]); + ssa.declare_block_predecessor(block1, ssa_block2, brz); + ssa.declare_block_predecessor(block1, ssa_block1, j0); + ssa.declare_block_predecessor(block2, ssa_block1, j1); } - ssa.seal_ebb_header_block(ebb1, &mut func); - ssa.seal_ebb_header_block(ebb2, &mut func); + ssa.seal_block_header_block(block1, &mut func); + ssa.seal_block_header_block(block2, &mut func); let flags = settings::Flags::new(settings::builder()); match verify_function(&func, &flags) { Ok(()) => {} diff --git a/cranelift/frontend/src/switch.rs b/cranelift/frontend/src/switch.rs index e4c147d16b..f444d9aacc 100644 --- a/cranelift/frontend/src/switch.rs +++ b/cranelift/frontend/src/switch.rs @@ -23,13 +23,13 @@ type EntryIndex = u64; /// # let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); /// # let mut builder = FunctionBuilder::new(&mut func, &mut fn_builder_ctx); /// # -/// # let entry = builder.create_ebb(); +/// # let entry = builder.create_block(); /// # builder.switch_to_block(entry); /// # -/// let block0 = builder.create_ebb(); -/// let block1 = builder.create_ebb(); -/// let block2 = builder.create_ebb(); -/// let fallback = builder.create_ebb(); +/// let block0 = builder.create_block(); +/// let block1 = builder.create_block(); +/// let block2 = builder.create_block(); +/// let fallback = builder.create_block(); /// /// let val = builder.ins().iconst(I32, 1); /// @@ -41,7 +41,7 @@ type EntryIndex = u64; /// ``` #[derive(Debug, Default)] pub struct Switch { - cases: HashMap, + cases: HashMap, } impl Switch { @@ -53,8 +53,8 @@ impl Switch { } /// Set a switch entry - pub fn set_entry(&mut self, index: EntryIndex, ebb: Ebb) { - let prev = self.cases.insert(index, ebb); + pub fn set_entry(&mut self, index: EntryIndex, block: Block) { + let prev = self.cases.insert(index, block); assert!( prev.is_none(), "Tried to set the same entry {} twice", @@ -63,7 +63,7 @@ impl Switch { } /// Get a reference to all existing entries - pub fn entries(&self) -> &HashMap { + pub fn entries(&self) -> &HashMap { &self.cases } @@ -82,7 +82,7 @@ impl Switch { let mut contiguous_case_ranges: Vec = vec![]; let mut last_index = None; - for (index, ebb) in cases { + for (index, block) in cases { match last_index { None => contiguous_case_ranges.push(ContiguousCaseRange::new(index)), Some(last_index) => { @@ -91,7 +91,11 @@ impl Switch { } } } - contiguous_case_ranges.last_mut().unwrap().ebbs.push(ebb); + contiguous_case_ranges + .last_mut() + .unwrap() + .blocks + .push(block); last_index = Some(index); } @@ -107,10 +111,10 @@ impl Switch { fn build_search_tree( bx: &mut FunctionBuilder, val: Value, - otherwise: Ebb, + otherwise: Block, contiguous_case_ranges: Vec, - ) -> Vec<(EntryIndex, Ebb, Vec)> { - let mut cases_and_jt_ebbs = Vec::new(); + ) -> Vec<(EntryIndex, Block, Vec)> { + let mut cases_and_jt_blocks = Vec::new(); // Avoid allocation in the common case if contiguous_case_ranges.len() <= 3 { @@ -119,17 +123,17 @@ impl Switch { val, otherwise, contiguous_case_ranges, - &mut cases_and_jt_ebbs, + &mut cases_and_jt_blocks, ); - return cases_and_jt_ebbs; + return cases_and_jt_blocks; } - let mut stack: Vec<(Option, Vec)> = Vec::new(); + let mut stack: Vec<(Option, Vec)> = Vec::new(); stack.push((None, contiguous_case_ranges)); - while let Some((ebb, contiguous_case_ranges)) = stack.pop() { - if let Some(ebb) = ebb { - bx.switch_to_block(ebb); + while let Some((block, contiguous_case_ranges)) = stack.pop() { + if let Some(block) = block { + bx.switch_to_block(block); } if contiguous_case_ranges.len() <= 3 { @@ -138,64 +142,68 @@ impl Switch { val, otherwise, contiguous_case_ranges, - &mut cases_and_jt_ebbs, + &mut cases_and_jt_blocks, ); } else { let split_point = contiguous_case_ranges.len() / 2; let mut left = contiguous_case_ranges; let right = left.split_off(split_point); - let left_ebb = bx.create_ebb(); - let right_ebb = bx.create_ebb(); + let left_block = bx.create_block(); + let right_block = bx.create_block(); let should_take_right_side = bx.ins().icmp_imm( IntCC::UnsignedGreaterThanOrEqual, val, right[0].first_index as i64, ); - bx.ins().brnz(should_take_right_side, right_ebb, &[]); - bx.ins().jump(left_ebb, &[]); + bx.ins().brnz(should_take_right_side, right_block, &[]); + bx.ins().jump(left_block, &[]); - stack.push((Some(left_ebb), left)); - stack.push((Some(right_ebb), right)); + stack.push((Some(left_block), left)); + stack.push((Some(right_block), right)); } } - cases_and_jt_ebbs + cases_and_jt_blocks } /// Linear search for the right `ContiguousCaseRange`. fn build_search_branches( bx: &mut FunctionBuilder, val: Value, - otherwise: Ebb, + otherwise: Block, contiguous_case_ranges: Vec, - cases_and_jt_ebbs: &mut Vec<(EntryIndex, Ebb, Vec)>, + cases_and_jt_blocks: &mut Vec<(EntryIndex, Block, Vec)>, ) { let mut was_branch = false; let ins_fallthrough_jump = |was_branch: bool, bx: &mut FunctionBuilder| { if was_branch { - let ebb = bx.create_ebb(); - bx.ins().jump(ebb, &[]); - bx.switch_to_block(ebb); + let block = bx.create_block(); + bx.ins().jump(block, &[]); + bx.switch_to_block(block); } }; - for ContiguousCaseRange { first_index, ebbs } in contiguous_case_ranges.into_iter().rev() { - match (ebbs.len(), first_index) { + for ContiguousCaseRange { + first_index, + blocks, + } in contiguous_case_ranges.into_iter().rev() + { + match (blocks.len(), first_index) { (1, 0) => { ins_fallthrough_jump(was_branch, bx); - bx.ins().brz(val, ebbs[0], &[]); + bx.ins().brz(val, blocks[0], &[]); } (1, _) => { ins_fallthrough_jump(was_branch, bx); let is_good_val = bx.ins().icmp_imm(IntCC::Equal, val, first_index as i64); - bx.ins().brnz(is_good_val, ebbs[0], &[]); + bx.ins().brnz(is_good_val, blocks[0], &[]); } (_, 0) => { // if `first_index` is 0, then `icmp_imm uge val, first_index` is trivially true - let jt_ebb = bx.create_ebb(); - bx.ins().jump(jt_ebb, &[]); - cases_and_jt_ebbs.push((first_index, jt_ebb, ebbs)); + let jt_block = bx.create_block(); + bx.ins().jump(jt_block, &[]); + cases_and_jt_blocks.push((first_index, jt_block, blocks)); // `jump otherwise` below must not be hit, because the current block has been // filled above. This is the last iteration anyway, as 0 is the smallest // unsigned int, so just return here. @@ -203,14 +211,14 @@ impl Switch { } (_, _) => { ins_fallthrough_jump(was_branch, bx); - let jt_ebb = bx.create_ebb(); + let jt_block = bx.create_block(); let is_good_val = bx.ins().icmp_imm( IntCC::UnsignedGreaterThanOrEqual, val, first_index as i64, ); - bx.ins().brnz(is_good_val, jt_ebb, &[]); - cases_and_jt_ebbs.push((first_index, jt_ebb, ebbs)); + bx.ins().brnz(is_good_val, jt_block, &[]); + cases_and_jt_blocks.push((first_index, jt_block, blocks)); } } was_branch = true; @@ -219,21 +227,21 @@ impl Switch { bx.ins().jump(otherwise, &[]); } - /// For every item in `cases_and_jt_ebbs` this will create a jump table in the specified ebb. + /// For every item in `cases_and_jt_blocks` this will create a jump table in the specified block. fn build_jump_tables( bx: &mut FunctionBuilder, val: Value, - otherwise: Ebb, - cases_and_jt_ebbs: Vec<(EntryIndex, Ebb, Vec)>, + otherwise: Block, + cases_and_jt_blocks: Vec<(EntryIndex, Block, Vec)>, ) { - for (first_index, jt_ebb, ebbs) in cases_and_jt_ebbs.into_iter().rev() { + for (first_index, jt_block, blocks) in cases_and_jt_blocks.into_iter().rev() { let mut jt_data = JumpTableData::new(); - for ebb in ebbs { - jt_data.push_entry(ebb); + for block in blocks { + jt_data.push_entry(block); } let jump_table = bx.create_jump_table(jt_data); - bx.switch_to_block(jt_ebb); + bx.switch_to_block(jt_block); let discr = if first_index == 0 { val } else { @@ -249,8 +257,8 @@ impl Switch { /// /// * The function builder to emit to /// * The value to switch on - /// * The default ebb - pub fn emit(self, bx: &mut FunctionBuilder, val: Value, otherwise: Ebb) { + /// * The default block + pub fn emit(self, bx: &mut FunctionBuilder, val: Value, otherwise: Block) { // FIXME icmp(_imm) doesn't have encodings for i8 and i16 on x86(_64) yet let val = match bx.func.dfg.value_type(val) { types::I8 | types::I16 => bx.ins().uextend(types::I32, val), @@ -258,19 +266,20 @@ impl Switch { }; let contiguous_case_ranges = self.collect_contiguous_case_ranges(); - let cases_and_jt_ebbs = Self::build_search_tree(bx, val, otherwise, contiguous_case_ranges); - Self::build_jump_tables(bx, val, otherwise, cases_and_jt_ebbs); + let cases_and_jt_blocks = + Self::build_search_tree(bx, val, otherwise, contiguous_case_ranges); + Self::build_jump_tables(bx, val, otherwise, cases_and_jt_blocks); } } /// This represents a contiguous range of cases to switch on. /// -/// For example 10 => ebb1, 11 => ebb2, 12 => ebb7 will be represented as: +/// For example 10 => block1, 11 => block2, 12 => block7 will be represented as: /// /// ```plain /// ContiguousCaseRange { /// first_index: 10, -/// ebbs: vec![Ebb::from_u32(1), Ebb::from_u32(2), Ebb::from_u32(7)] +/// blocks: vec![Block::from_u32(1), Block::from_u32(2), Block::from_u32(7)] /// } /// ``` #[derive(Debug)] @@ -278,15 +287,15 @@ struct ContiguousCaseRange { /// The entry index of the first case. Eg. 10 when the entry indexes are 10, 11, 12 and 13. first_index: EntryIndex, - /// The ebbs to jump to sorted in ascending order of entry index. - ebbs: Vec, + /// The blocks to jump to sorted in ascending order of entry index. + blocks: Vec, } impl ContiguousCaseRange { fn new(first_index: EntryIndex) -> Self { Self { first_index, - ebbs: Vec::new(), + blocks: Vec::new(), } } } @@ -304,15 +313,15 @@ mod tests { let mut func_ctx = FunctionBuilderContext::new(); { let mut bx = FunctionBuilder::new(&mut func, &mut func_ctx); - let ebb = bx.create_ebb(); - bx.switch_to_block(ebb); + let block = bx.create_block(); + bx.switch_to_block(block); let val = bx.ins().iconst(types::I8, 0); let mut switch = Switch::new(); $( - let ebb = bx.create_ebb(); - switch.set_entry($index, ebb); + let block = bx.create_block(); + switch.set_entry($index, block); )* - switch.emit(&mut bx, val, Ebb::with_number($default).unwrap()); + switch.emit(&mut bx, val, Block::with_number($default).unwrap()); } func .to_string() @@ -327,11 +336,11 @@ mod tests { let func = setup!(0, [0,]); assert_eq!( func, - "ebb0: + "block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 - brz v1, ebb1 - jump ebb0" + brz v1, block1 + jump block0" ); } @@ -340,12 +349,12 @@ mod tests { let func = setup!(0, [1,]); assert_eq!( func, - "ebb0: + "block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm eq v1, 1 - brnz v2, ebb1 - jump ebb0" + brnz v2, block1 + jump block0" ); } @@ -354,15 +363,15 @@ mod tests { let func = setup!(0, [0, 1,]); assert_eq!( func, - " jt0 = jump_table [ebb1, ebb2] + " jt0 = jump_table [block1, block2] -ebb0: +block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 - jump ebb3 + jump block3 -ebb3: - br_table.i32 v1, ebb0, jt0" +block3: + br_table.i32 v1, block0, jt0" ); } @@ -371,16 +380,16 @@ ebb3: let func = setup!(0, [0, 2,]); assert_eq!( func, - "ebb0: + "block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm eq v1, 2 - brnz v2, ebb2 - jump ebb3 + brnz v2, block2 + jump block3 -ebb3: - brz.i32 v1, ebb1 - jump ebb0" +block3: + brz.i32 v1, block1 + jump block0" ); } @@ -389,37 +398,37 @@ ebb3: let func = setup!(0, [0, 1, 5, 7, 10, 11, 12,]); assert_eq!( func, - " jt0 = jump_table [ebb1, ebb2] - jt1 = jump_table [ebb5, ebb6, ebb7] + " jt0 = jump_table [block1, block2] + jt1 = jump_table [block5, block6, block7] -ebb0: +block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm uge v1, 7 - brnz v2, ebb9 - jump ebb8 + brnz v2, block9 + jump block8 -ebb9: +block9: v3 = icmp_imm.i32 uge v1, 10 - brnz v3, ebb10 - jump ebb11 + brnz v3, block10 + jump block11 -ebb11: +block11: v4 = icmp_imm.i32 eq v1, 7 - brnz v4, ebb4 - jump ebb0 + brnz v4, block4 + jump block0 -ebb8: +block8: v5 = icmp_imm.i32 eq v1, 5 - brnz v5, ebb3 - jump ebb12 + brnz v5, block3 + jump block12 -ebb12: - br_table.i32 v1, ebb0, jt0 +block12: + br_table.i32 v1, block0, jt0 -ebb10: +block10: v6 = iadd_imm.i32 v1, -10 - br_table v6, ebb0, jt1" + br_table v6, block0, jt1" ); } @@ -428,17 +437,17 @@ ebb10: let func = setup!(0, [::core::i64::MIN as u64, 1,]); assert_eq!( func, - "ebb0: + "block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm eq v1, 0x8000_0000_0000_0000 - brnz v2, ebb1 - jump ebb3 + brnz v2, block1 + jump block3 -ebb3: +block3: v3 = icmp_imm.i32 eq v1, 1 - brnz v3, ebb2 - jump ebb0" + brnz v3, block2 + jump block0" ); } @@ -447,17 +456,17 @@ ebb3: let func = setup!(0, [::core::i64::MAX as u64, 1,]); assert_eq!( func, - "ebb0: + "block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm eq v1, 0x7fff_ffff_ffff_ffff - brnz v2, ebb1 - jump ebb3 + brnz v2, block1 + jump block3 -ebb3: +block3: v3 = icmp_imm.i32 eq v1, 1 - brnz v3, ebb2 - jump ebb0" + brnz v3, block2 + jump block0" ) } @@ -466,17 +475,17 @@ ebb3: let func = setup!(0, [-1i64 as u64, 0, 1,]); assert_eq!( func, - " jt0 = jump_table [ebb2, ebb3] + " jt0 = jump_table [block2, block3] -ebb0: +block0: v0 = iconst.i8 0 v1 = uextend.i32 v0 v2 = icmp_imm eq v1, -1 - brnz v2, ebb1 - jump ebb4 + brnz v2, block1 + jump block4 -ebb4: - br_table.i32 v1, ebb0, jt0" +block4: + br_table.i32 v1, block0, jt0" ); } } diff --git a/cranelift/object/src/backend.rs b/cranelift/object/src/backend.rs index 28874cb521..d6a2d98385 100644 --- a/cranelift/object/src/backend.rs +++ b/cranelift/object/src/backend.rs @@ -525,7 +525,7 @@ struct ObjectRelocSink { } impl RelocSink for ObjectRelocSink { - fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { + fn reloc_block(&mut self, _offset: CodeOffset, _reloc: Reloc, _block_offset: CodeOffset) { unimplemented!(); } diff --git a/cranelift/preopt/src/constant_folding.rs b/cranelift/preopt/src/constant_folding.rs index b87db2d98f..40d597eddc 100644 --- a/cranelift/preopt/src/constant_folding.rs +++ b/cranelift/preopt/src/constant_folding.rs @@ -44,7 +44,7 @@ impl ConstImm { pub fn fold_constants(func: &mut ir::Function) { let mut pos = FuncCursor::new(func); - while let Some(_ebb) = pos.next_ebb() { + while let Some(_block) = pos.next_block() { while let Some(inst) = pos.next_inst() { use self::ir::InstructionData::*; match pos.func.dfg[inst] { @@ -225,7 +225,7 @@ fn fold_unary(dfg: &mut ir::DataFlowGraph, inst: ir::Inst, opcode: ir::Opcode, a } fn fold_branch(pos: &mut FuncCursor, inst: ir::Inst, opcode: ir::Opcode) { - let (cond, ebb, args) = { + let (cond, block, args) = { let values = pos.func.dfg.inst_args(inst); let inst_data = &pos.func.dfg[inst]; ( @@ -246,8 +246,8 @@ fn fold_branch(pos: &mut FuncCursor, inst: ir::Inst, opcode: ir::Opcode) { }; if (branch_if_zero && !truthiness) || (!branch_if_zero && truthiness) { - pos.func.dfg.replace(inst).jump(ebb, &args); - // remove the rest of the ebb to avoid verifier errors + pos.func.dfg.replace(inst).jump(block, &args); + // remove the rest of the block to avoid verifier errors while let Some(next_inst) = pos.func.layout.next_inst(inst) { pos.func.layout.remove_inst(next_inst); } diff --git a/cranelift/reader/src/lexer.rs b/cranelift/reader/src/lexer.rs index 9838227e86..1d2908a92f 100644 --- a/cranelift/reader/src/lexer.rs +++ b/cranelift/reader/src/lexer.rs @@ -2,7 +2,7 @@ use crate::error::Location; use cranelift_codegen::ir::types; -use cranelift_codegen::ir::{Ebb, Value}; +use cranelift_codegen::ir::{Block, Value}; #[allow(unused_imports, deprecated)] use std::ascii::AsciiExt; use std::str::CharIndices; @@ -33,7 +33,7 @@ pub enum Token<'a> { Integer(&'a str), // Integer immediate Type(types::Type), // i32, f32, b32x4, ... Value(Value), // v12, v7 - Ebb(Ebb), // ebb3 + Block(Block), // block3 StackSlot(u32), // ss3 GlobalValue(u32), // gv3 Heap(u32), // heap2 @@ -318,7 +318,7 @@ impl<'a> Lexer<'a> { } let text = &self.source[begin..self.pos]; - // Look for numbered well-known entities like ebb15, v45, ... + // Look for numbered well-known entities like block15, v45, ... token( split_entity_name(text) .and_then(|(prefix, number)| { @@ -339,7 +339,7 @@ impl<'a> Lexer<'a> { fn numbered_entity(prefix: &str, number: u32) -> Option> { match prefix { "v" => Value::with_number(number).map(Token::Value), - "ebb" => Ebb::with_number(number).map(Token::Ebb), + "block" => Block::with_number(number).map(Token::Block), "ss" => Some(Token::StackSlot(number)), "gv" => Some(Token::GlobalValue(number)), "heap" => Some(Token::Heap(number)), @@ -519,7 +519,7 @@ mod tests { use super::*; use crate::error::Location; use cranelift_codegen::ir::types; - use cranelift_codegen::ir::{Ebb, Value}; + use cranelift_codegen::ir::{Block, Value}; #[test] fn digits() { @@ -616,7 +616,7 @@ mod tests { #[test] fn lex_identifiers() { let mut lex = Lexer::new( - "v0 v00 vx01 ebb1234567890 ebb5234567890 v1x vx1 vxvx4 \ + "v0 v00 vx01 block1234567890 block5234567890 v1x vx1 vxvx4 \ function0 function b1 i32x4 f32x5 \ iflags fflags iflagss", ); @@ -628,9 +628,9 @@ mod tests { assert_eq!(lex.next(), token(Token::Identifier("vx01"), 1)); assert_eq!( lex.next(), - token(Token::Ebb(Ebb::with_number(1234567890).unwrap()), 1) + token(Token::Block(Block::with_number(1234567890).unwrap()), 1) ); - assert_eq!(lex.next(), token(Token::Identifier("ebb5234567890"), 1)); + assert_eq!(lex.next(), token(Token::Identifier("block5234567890"), 1)); assert_eq!(lex.next(), token(Token::Identifier("v1x"), 1)); assert_eq!(lex.next(), token(Token::Identifier("vx1"), 1)); assert_eq!(lex.next(), token(Token::Identifier("vxvx4"), 1)); @@ -656,7 +656,7 @@ mod tests { #[test] fn lex_names() { - let mut lex = Lexer::new("%0 %x3 %function %123_abc %ss0 %v3 %ebb11 %_"); + let mut lex = Lexer::new("%0 %x3 %function %123_abc %ss0 %v3 %block11 %_"); assert_eq!(lex.next(), token(Token::Name("0"), 1)); assert_eq!(lex.next(), token(Token::Name("x3"), 1)); @@ -664,7 +664,7 @@ mod tests { assert_eq!(lex.next(), token(Token::Name("123_abc"), 1)); assert_eq!(lex.next(), token(Token::Name("ss0"), 1)); assert_eq!(lex.next(), token(Token::Name("v3"), 1)); - assert_eq!(lex.next(), token(Token::Name("ebb11"), 1)); + assert_eq!(lex.next(), token(Token::Name("block11"), 1)); assert_eq!(lex.next(), token(Token::Name("_"), 1)); } diff --git a/cranelift/reader/src/parser.rs b/cranelift/reader/src/parser.rs index 8b603c7870..f0b866f472 100644 --- a/cranelift/reader/src/parser.rs +++ b/cranelift/reader/src/parser.rs @@ -14,7 +14,7 @@ use cranelift_codegen::ir::instructions::{InstructionData, InstructionFormat, Va use cranelift_codegen::ir::types::INVALID; use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::{ - AbiParam, ArgumentExtension, ArgumentLoc, ConstantData, Ebb, ExtFuncData, ExternalName, + AbiParam, ArgumentExtension, ArgumentLoc, Block, ConstantData, ExtFuncData, ExternalName, FuncRef, Function, GlobalValue, GlobalValueData, Heap, HeapData, HeapStyle, JumpTable, JumpTableData, MemFlags, Opcode, SigRef, Signature, StackSlot, StackSlotData, StackSlotKind, Table, TableData, Type, Value, ValueLoc, @@ -334,14 +334,14 @@ impl<'a> Context<'a> { } } - // Allocate a new EBB. - fn add_ebb(&mut self, ebb: Ebb, loc: Location) -> ParseResult { - self.map.def_ebb(ebb, loc)?; - while self.function.dfg.num_ebbs() <= ebb.index() { - self.function.dfg.make_ebb(); + // Allocate a new block. + fn add_block(&mut self, block: Block, loc: Location) -> ParseResult { + self.map.def_block(block, loc)?; + while self.function.dfg.num_blocks() <= block.index() { + self.function.dfg.make_block(); } - self.function.layout.append_ebb(ebb); - Ok(ebb) + self.function.layout.append_block(block); + Ok(block) } } @@ -554,11 +554,11 @@ impl<'a> Parser<'a> { err!(self.loc, "expected jump table number: jt«n»") } - // Match and consume an ebb reference. - fn match_ebb(&mut self, err_msg: &str) -> ParseResult { - if let Some(Token::Ebb(ebb)) = self.token() { + // Match and consume an block reference. + fn match_block(&mut self, err_msg: &str) -> ParseResult { + if let Some(Token::Block(block)) = self.token() { self.consume(); - Ok(ebb) + Ok(block) } else { err!(self.loc, err_msg) } @@ -1686,9 +1686,9 @@ impl<'a> Parser<'a> { let mut data = JumpTableData::new(); - // jump-table-decl ::= JumpTable(jt) "=" "jump_table" "[" * Ebb(dest) {"," Ebb(dest)} "]" + // jump-table-decl ::= JumpTable(jt) "=" "jump_table" "[" * Block(dest) {"," Block(dest)} "]" match self.token() { - Some(Token::Ebb(dest)) => { + Some(Token::Block(dest)) => { self.consume(); data.push_entry(dest); @@ -1696,7 +1696,7 @@ impl<'a> Parser<'a> { match self.token() { Some(Token::Comma) => { self.consume(); - if let Some(Token::Ebb(dest)) = self.token() { + if let Some(Token::Block(dest)) = self.token() { self.consume(); data.push_entry(dest); } else { @@ -1727,13 +1727,13 @@ impl<'a> Parser<'a> { // fn parse_function_body(&mut self, ctx: &mut Context) -> ParseResult<()> { while self.token() != Some(Token::RBrace) { - self.parse_extended_basic_block(ctx)?; + self.parse_basic_block(ctx)?; } // Now that we've seen all defined values in the function, ensure that // all references refer to a definition. - for ebb in &ctx.function.layout { - for inst in ctx.function.layout.ebb_insts(ebb) { + for block in &ctx.function.layout { + for inst in ctx.function.layout.block_insts(block) { for value in ctx.function.dfg.inst_args(inst) { if !ctx.map.contains_value(*value) { return err!( @@ -1756,29 +1756,29 @@ impl<'a> Parser<'a> { Ok(()) } - // Parse an extended basic block, add contents to `ctx`. + // Parse a basic block, add contents to `ctx`. // - // extended-basic-block ::= * ebb-header { instruction } - // ebb-header ::= Ebb(ebb) [ebb-params] ":" + // extended-basic-block ::= * block-header { instruction } + // block-header ::= Block(block) [block-params] ":" // - fn parse_extended_basic_block(&mut self, ctx: &mut Context) -> ParseResult<()> { - // Collect comments for the next ebb. + fn parse_basic_block(&mut self, ctx: &mut Context) -> ParseResult<()> { + // Collect comments for the next block. self.start_gathering_comments(); - let ebb_num = self.match_ebb("expected EBB header")?; - let ebb = ctx.add_ebb(ebb_num, self.loc)?; + let block_num = self.match_block("expected block header")?; + let block = ctx.add_block(block_num, self.loc)?; if !self.optional(Token::Colon) { - // ebb-header ::= Ebb(ebb) [ * ebb-params ] ":" - self.parse_ebb_params(ctx, ebb)?; - self.match_token(Token::Colon, "expected ':' after EBB parameters")?; + // block-header ::= Block(block) [ * block-params ] ":" + self.parse_block_params(ctx, block)?; + self.match_token(Token::Colon, "expected ':' after block parameters")?; } // Collect any trailing comments. self.token(); - self.claim_gathered_comments(ebb); + self.claim_gathered_comments(block); - // extended-basic-block ::= ebb-header * { instruction } + // extended-basic-block ::= block-header * { instruction } while match self.token() { Some(Token::Value(_)) | Some(Token::Identifier(_)) @@ -1808,64 +1808,76 @@ impl<'a> Parser<'a> { } Some(Token::Equal) => { self.consume(); - self.parse_instruction(&results, srcloc, encoding, result_locations, ctx, ebb)?; + self.parse_instruction( + &results, + srcloc, + encoding, + result_locations, + ctx, + block, + )?; } _ if !results.is_empty() => return err!(self.loc, "expected -> or ="), - _ => { - self.parse_instruction(&results, srcloc, encoding, result_locations, ctx, ebb)? - } + _ => self.parse_instruction( + &results, + srcloc, + encoding, + result_locations, + ctx, + block, + )?, } } Ok(()) } - // Parse parenthesized list of EBB parameters. Returns a vector of (u32, Type) pairs with the + // Parse parenthesized list of block parameters. Returns a vector of (u32, Type) pairs with the // value numbers of the defined values and the defined types. // - // ebb-params ::= * "(" ebb-param { "," ebb-param } ")" - fn parse_ebb_params(&mut self, ctx: &mut Context, ebb: Ebb) -> ParseResult<()> { - // ebb-params ::= * "(" ebb-param { "," ebb-param } ")" - self.match_token(Token::LPar, "expected '(' before EBB parameters")?; + // block-params ::= * "(" block-param { "," block-param } ")" + fn parse_block_params(&mut self, ctx: &mut Context, block: Block) -> ParseResult<()> { + // block-params ::= * "(" block-param { "," block-param } ")" + self.match_token(Token::LPar, "expected '(' before block parameters")?; - // ebb-params ::= "(" * ebb-param { "," ebb-param } ")" - self.parse_ebb_param(ctx, ebb)?; + // block-params ::= "(" * block-param { "," block-param } ")" + self.parse_block_param(ctx, block)?; - // ebb-params ::= "(" ebb-param * { "," ebb-param } ")" + // block-params ::= "(" block-param * { "," block-param } ")" while self.optional(Token::Comma) { - // ebb-params ::= "(" ebb-param { "," * ebb-param } ")" - self.parse_ebb_param(ctx, ebb)?; + // block-params ::= "(" block-param { "," * block-param } ")" + self.parse_block_param(ctx, block)?; } - // ebb-params ::= "(" ebb-param { "," ebb-param } * ")" - self.match_token(Token::RPar, "expected ')' after EBB parameters")?; + // block-params ::= "(" block-param { "," block-param } * ")" + self.match_token(Token::RPar, "expected ')' after block parameters")?; Ok(()) } - // Parse a single EBB parameter declaration, and append it to `ebb`. + // Parse a single block parameter declaration, and append it to `block`. // - // ebb-param ::= * Value(v) ":" Type(t) arg-loc? + // block-param ::= * Value(v) ":" Type(t) arg-loc? // arg-loc ::= "[" value-location "]" // - fn parse_ebb_param(&mut self, ctx: &mut Context, ebb: Ebb) -> ParseResult<()> { - // ebb-param ::= * Value(v) ":" Type(t) arg-loc? - let v = self.match_value("EBB argument must be a value")?; + fn parse_block_param(&mut self, ctx: &mut Context, block: Block) -> ParseResult<()> { + // block-param ::= * Value(v) ":" Type(t) arg-loc? + let v = self.match_value("block argument must be a value")?; let v_location = self.loc; - // ebb-param ::= Value(v) * ":" Type(t) arg-loc? - self.match_token(Token::Colon, "expected ':' after EBB argument")?; - // ebb-param ::= Value(v) ":" * Type(t) arg-loc? + // block-param ::= Value(v) * ":" Type(t) arg-loc? + self.match_token(Token::Colon, "expected ':' after block argument")?; + // block-param ::= Value(v) ":" * Type(t) arg-loc? while ctx.function.dfg.num_values() <= v.index() { ctx.function.dfg.make_invalid_value_for_parser(); } - let t = self.match_type("expected EBB argument type")?; - // Allocate the EBB argument. - ctx.function.dfg.append_ebb_param_for_parser(ebb, t, v); + let t = self.match_type("expected block argument type")?; + // Allocate the block argument. + ctx.function.dfg.append_block_param_for_parser(block, t, v); ctx.map.def_value(v, v_location)?; - // ebb-param ::= Value(v) ":" Type(t) * arg-loc? + // block-param ::= Value(v) ":" Type(t) * arg-loc? if self.optional(Token::LBracket) { let loc = self.parse_value_location(ctx)?; ctx.function.locations[v] = loc; @@ -1981,7 +1993,7 @@ impl<'a> Parser<'a> { Ok(results) } - // Parse a value alias, and append it to `ebb`. + // Parse a value alias, and append it to `block`. // // value_alias ::= [inst-results] "->" Value(v) // @@ -2022,7 +2034,7 @@ impl<'a> Parser<'a> { Ok(()) } - // Parse an instruction, append it to `ebb`. + // Parse an instruction, append it to `block`. // // instruction ::= [inst-results "="] Opcode(opc) ["." Type] ... // @@ -2033,7 +2045,7 @@ impl<'a> Parser<'a> { encoding: Option, result_locations: Option>, ctx: &mut Context, - ebb: Ebb, + block: Block, ) -> ParseResult<()> { // Define the result values. for val in results { @@ -2077,7 +2089,7 @@ impl<'a> Parser<'a> { ctx.function .dfg .make_inst_results_for_parser(inst, ctrl_typevar, results); - ctx.function.layout.append_inst(inst, ebb); + ctx.function.layout.append_inst(inst, block); ctx.map .def_entity(inst.into(), opcode_loc) .expect("duplicate inst references created"); @@ -2345,23 +2357,23 @@ impl<'a> Parser<'a> { } InstructionFormat::NullAry => InstructionData::NullAry { opcode }, InstructionFormat::Jump => { - // Parse the destination EBB number. - let ebb_num = self.match_ebb("expected jump destination EBB")?; + // Parse the destination block number. + let block_num = self.match_block("expected jump destination block")?; let args = self.parse_opt_value_list()?; InstructionData::Jump { opcode, - destination: ebb_num, + destination: block_num, args: args.into_value_list(&[], &mut ctx.function.dfg.value_lists), } } InstructionFormat::Branch => { let ctrl_arg = self.match_value("expected SSA value control operand")?; self.match_token(Token::Comma, "expected ',' between operands")?; - let ebb_num = self.match_ebb("expected branch destination EBB")?; + let block_num = self.match_block("expected branch destination block")?; let args = self.parse_opt_value_list()?; InstructionData::Branch { opcode, - destination: ebb_num, + destination: block_num, args: args.into_value_list(&[ctrl_arg], &mut ctx.function.dfg.value_lists), } } @@ -2369,12 +2381,12 @@ impl<'a> Parser<'a> { let cond = self.match_enum("expected intcc condition code")?; let arg = self.match_value("expected SSA value first operand")?; self.match_token(Token::Comma, "expected ',' between operands")?; - let ebb_num = self.match_ebb("expected branch destination EBB")?; + let block_num = self.match_block("expected branch destination block")?; let args = self.parse_opt_value_list()?; InstructionData::BranchInt { opcode, cond, - destination: ebb_num, + destination: block_num, args: args.into_value_list(&[arg], &mut ctx.function.dfg.value_lists), } } @@ -2382,12 +2394,12 @@ impl<'a> Parser<'a> { let cond = self.match_enum("expected floatcc condition code")?; let arg = self.match_value("expected SSA value first operand")?; self.match_token(Token::Comma, "expected ',' between operands")?; - let ebb_num = self.match_ebb("expected branch destination EBB")?; + let block_num = self.match_block("expected branch destination block")?; let args = self.parse_opt_value_list()?; InstructionData::BranchFloat { opcode, cond, - destination: ebb_num, + destination: block_num, args: args.into_value_list(&[arg], &mut ctx.function.dfg.value_lists), } } @@ -2397,26 +2409,26 @@ impl<'a> Parser<'a> { self.match_token(Token::Comma, "expected ',' between operands")?; let rhs = self.match_value("expected SSA value second operand")?; self.match_token(Token::Comma, "expected ',' between operands")?; - let ebb_num = self.match_ebb("expected branch destination EBB")?; + let block_num = self.match_block("expected branch destination block")?; let args = self.parse_opt_value_list()?; InstructionData::BranchIcmp { opcode, cond, - destination: ebb_num, + destination: block_num, args: args.into_value_list(&[lhs, rhs], &mut ctx.function.dfg.value_lists), } } InstructionFormat::BranchTable => { let arg = self.match_value("expected SSA value operand")?; self.match_token(Token::Comma, "expected ',' between operands")?; - let ebb_num = self.match_ebb("expected branch destination EBB")?; + let block_num = self.match_block("expected branch destination block")?; self.match_token(Token::Comma, "expected ',' between operands")?; let table = self.match_jt()?; ctx.check_jt(table, self.loc)?; InstructionData::BranchTable { opcode, arg, - destination: ebb_num, + destination: block_num, table, } } @@ -2810,7 +2822,7 @@ mod tests { fn aliases() { let (func, details) = Parser::new( "function %qux() system_v { - ebb0: + block0: v4 = iconst.i8 6 v3 -> v4 v1 = iadd_imm v3, 17 @@ -2925,45 +2937,45 @@ mod tests { } #[test] - fn ebb_header() { + fn block_header() { let (func, _) = Parser::new( - "function %ebbs() system_v { - ebb0: - ebb4(v3: i32): + "function %blocks() system_v { + block0: + block4(v3: i32): }", ) .parse_function(None) .unwrap(); - assert_eq!(func.name.to_string(), "%ebbs"); + assert_eq!(func.name.to_string(), "%blocks"); - let mut ebbs = func.layout.ebbs(); + let mut blocks = func.layout.blocks(); - let ebb0 = ebbs.next().unwrap(); - assert_eq!(func.dfg.ebb_params(ebb0), &[]); + let block0 = blocks.next().unwrap(); + assert_eq!(func.dfg.block_params(block0), &[]); - let ebb4 = ebbs.next().unwrap(); - let ebb4_args = func.dfg.ebb_params(ebb4); - assert_eq!(ebb4_args.len(), 1); - assert_eq!(func.dfg.value_type(ebb4_args[0]), types::I32); + let block4 = blocks.next().unwrap(); + let block4_args = func.dfg.block_params(block4); + assert_eq!(block4_args.len(), 1); + assert_eq!(func.dfg.value_type(block4_args[0]), types::I32); } #[test] - fn duplicate_ebb() { + fn duplicate_block() { let ParseError { location, message, is_warning, } = Parser::new( - "function %ebbs() system_v { - ebb0: - ebb0: + "function %blocks() system_v { + block0: + block0: return 2", ) .parse_function(None) .unwrap_err(); assert_eq!(location.line_number, 3); - assert_eq!(message, "duplicate entity: ebb0"); + assert_eq!(message, "duplicate entity: block0"); assert!(!is_warning); } @@ -2974,7 +2986,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { jt0 = jump_table [] jt0 = jump_table []", ) @@ -2993,7 +3005,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { ss0 = explicit_slot 8 ss0 = explicit_slot 8", ) @@ -3012,7 +3024,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { gv0 = vmctx gv0 = vmctx", ) @@ -3031,7 +3043,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { heap0 = static gv0, min 0x1000, bound 0x10_0000, offset_guard 0x1000 heap0 = static gv0, min 0x1000, bound 0x10_0000, offset_guard 0x1000", ) @@ -3050,7 +3062,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { sig0 = () sig0 = ()", ) @@ -3069,7 +3081,7 @@ mod tests { message, is_warning, } = Parser::new( - "function %ebbs() system_v { + "function %blocks() system_v { sig0 = () fn0 = %foo sig0 fn0 = %foo sig0", @@ -3089,9 +3101,9 @@ mod tests { function %comment() system_v { ; decl ss10 = outgoing_arg 13 ; stackslot. ; Still stackslot. - jt10 = jump_table [ebb0] + jt10 = jump_table [block0] ; Jumptable - ebb0: ; Basic block + block0: ; Basic block trap user42; Instruction } ; Trailing. ; More trailing.", @@ -3112,7 +3124,7 @@ mod tests { assert_eq!(comments[2].text, "; Still stackslot."); assert_eq!(comments[3].entity.to_string(), "jt10"); assert_eq!(comments[3].text, "; Jumptable"); - assert_eq!(comments[4].entity.to_string(), "ebb0"); + assert_eq!(comments[4].entity.to_string(), "block0"); assert_eq!(comments[4].text, "; Basic block"); assert_eq!(comments[5].entity.to_string(), "inst0"); @@ -3195,7 +3207,7 @@ mod tests { // Valid characters in the name: let func = Parser::new( "function u1:2() system_v { - ebb0: + block0: trap int_divz }", ) @@ -3207,7 +3219,7 @@ mod tests { // Invalid characters in the name: let mut parser = Parser::new( "function u123:abc() system_v { - ebb0: + block0: trap stk_ovf }", ); @@ -3216,7 +3228,7 @@ mod tests { // Incomplete function names should not be valid: let mut parser = Parser::new( "function u() system_v { - ebb0: + block0: trap int_ovf }", ); @@ -3224,7 +3236,7 @@ mod tests { let mut parser = Parser::new( "function u0() system_v { - ebb0: + block0: trap int_ovf }", ); @@ -3232,7 +3244,7 @@ mod tests { let mut parser = Parser::new( "function u0:() system_v { - ebb0: + block0: trap int_ovf }", ); @@ -3242,7 +3254,7 @@ mod tests { #[test] fn change_default_calling_convention() { let code = "function %test() { - ebb0: + block0: return }"; diff --git a/cranelift/reader/src/sourcemap.rs b/cranelift/reader/src/sourcemap.rs index 6291d0cd76..126fd219c3 100644 --- a/cranelift/reader/src/sourcemap.rs +++ b/cranelift/reader/src/sourcemap.rs @@ -1,7 +1,7 @@ //! Source map associating entities with their source locations. //! //! When the parser reads in a source file, it records the locations of the -//! definitions of entities like instructions, EBBs, and values. +//! definitions of entities like instructions, blocks, and values. //! //! The `SourceMap` struct defined in this module makes this mapping available //! to parser clients. @@ -10,7 +10,7 @@ use crate::error::{Location, ParseResult}; use crate::lexer::split_entity_name; use cranelift_codegen::ir::entities::AnyEntity; use cranelift_codegen::ir::{ - Ebb, FuncRef, GlobalValue, Heap, JumpTable, SigRef, StackSlot, Table, Value, + Block, FuncRef, GlobalValue, Heap, JumpTable, SigRef, StackSlot, Table, Value, }; use std::collections::HashMap; @@ -28,9 +28,9 @@ impl SourceMap { self.locations.contains_key(&v.into()) } - /// Look up a EBB entity. - pub fn contains_ebb(&self, ebb: Ebb) -> bool { - self.locations.contains_key(&ebb.into()) + /// Look up a block entity. + pub fn contains_block(&self, block: Block) -> bool { + self.locations.contains_key(&block.into()) } /// Look up a stack slot entity. @@ -79,11 +79,11 @@ impl SourceMap { Some(v.into()) } }), - "ebb" => Ebb::with_number(num).and_then(|ebb| { - if !self.contains_ebb(ebb) { + "block" => Block::with_number(num).and_then(|block| { + if !self.contains_block(block) { None } else { - Some(ebb.into()) + Some(block.into()) } }), "ss" => StackSlot::with_number(num).and_then(|ss| { @@ -158,8 +158,8 @@ impl SourceMap { self.def_entity(entity.into(), loc) } - /// Define the ebb `entity`. - pub fn def_ebb(&mut self, entity: Ebb, loc: Location) -> ParseResult<()> { + /// Define the block `entity`. + pub fn def_block(&mut self, entity: Block, loc: Location) -> ParseResult<()> { self.def_entity(entity.into(), loc) } @@ -218,8 +218,8 @@ mod tests { let tf = parse_test( "function %detail() { ss10 = incoming_arg 13 - jt10 = jump_table [ebb0] - ebb0(v4: i32, v7: i32): + jt10 = jump_table [block0] + block0(v4: i32, v7: i32): v10 = iadd v4, v7 }", ParseOptions::default(), @@ -231,7 +231,7 @@ mod tests { assert_eq!(map.lookup_str("ss1"), None); assert_eq!(map.lookup_str("ss10").unwrap().to_string(), "ss10"); assert_eq!(map.lookup_str("jt10").unwrap().to_string(), "jt10"); - assert_eq!(map.lookup_str("ebb0").unwrap().to_string(), "ebb0"); + assert_eq!(map.lookup_str("block0").unwrap().to_string(), "block0"); assert_eq!(map.lookup_str("v4").unwrap().to_string(), "v4"); assert_eq!(map.lookup_str("v7").unwrap().to_string(), "v7"); assert_eq!(map.lookup_str("v10").unwrap().to_string(), "v10"); diff --git a/cranelift/reader/src/testfile.rs b/cranelift/reader/src/testfile.rs index 68c7d30a93..4cfdd8f3e5 100644 --- a/cranelift/reader/src/testfile.rs +++ b/cranelift/reader/src/testfile.rs @@ -44,7 +44,7 @@ pub struct Details<'a> { /// A comment in a parsed function. /// -/// The comment belongs to the immediately preceding entity, whether that is an EBB header, and +/// The comment belongs to the immediately preceding entity, whether that is an block header, and /// instruction, or one of the preamble declarations. /// /// Comments appearing inside the function but before the preamble, as well as comments appearing diff --git a/cranelift/serde/src/serde_clif_json.rs b/cranelift/serde/src/serde_clif_json.rs index 0d19ee5fa0..2d950cf3a8 100644 --- a/cranelift/serde/src/serde_clif_json.rs +++ b/cranelift/serde/src/serde_clif_json.rs @@ -1,4 +1,4 @@ -use cranelift_codegen::ir::{Ebb, Function, Inst, InstructionData, Signature}; +use cranelift_codegen::ir::{Block, Function, Inst, InstructionData, Signature}; use serde_derive::{Deserialize, Serialize}; /// Serializable version of the original Cranelift IR @@ -758,27 +758,27 @@ impl SerInst { } } -/// Serializable version of Cranelift IR Ebbs. +/// Serializable version of Cranelift IR Blocks. #[derive(Clone, Deserialize, Serialize, Debug)] -pub struct SerEbb { - pub ebb: String, +pub struct SerBlock { + pub block: String, pub params: Vec, pub insts: Vec, } -impl SerEbb { +impl SerBlock { pub fn new(name: String) -> Self { Self { - ebb: name, + block: name, params: Vec::new(), insts: Vec::new(), } } } -pub fn populate_inst(func: &Function, ebb: Ebb) -> Vec { +pub fn populate_inst(func: &Function, block: Block) -> Vec { let mut ser_vec: Vec = Vec::new(); - let ret_iter = func.layout.ebb_insts(ebb); + let ret_iter = func.layout.block_insts(block); for inst in ret_iter { let ser_inst: SerInst = SerInst::new(inst, &func); ser_vec.push(ser_inst); @@ -786,10 +786,10 @@ pub fn populate_inst(func: &Function, ebb: Ebb) -> Vec { ser_vec } -/// Translating Ebb parameters into serializable parameters. -pub fn populate_params(func: &Function, ebb: Ebb) -> Vec { +/// Translating Block parameters into serializable parameters. +pub fn populate_params(func: &Function, block: Block) -> Vec { let mut ser_vec: Vec = Vec::new(); - let parameters = func.dfg.ebb_params(ebb); + let parameters = func.dfg.block_params(block); for param in parameters { ser_vec.push(param.to_string()); } @@ -799,27 +799,27 @@ pub fn populate_params(func: &Function, ebb: Ebb) -> Vec { /// Serializable Data Flow Graph. #[derive(Deserialize, Serialize, Debug)] pub struct SerDataFlowGraph { - ebbs: Vec, + blocks: Vec, } -/// Serialize all parts of the Cranelift Ebb data structure, this includes name, parameters, and +/// Serialize all parts of the Cranelift Block data structure, this includes name, parameters, and /// instructions. -pub fn populate_ebbs(func: &Function) -> Vec { - let mut ebb_vec: Vec = Vec::new(); - for ebb in func.layout.ebbs() { - let mut ser_ebb: SerEbb = SerEbb::new(ebb.to_string()); - ser_ebb.params = populate_params(&func, ebb); - ser_ebb.insts = populate_inst(&func, ebb); - ebb_vec.push(ser_ebb); +pub fn populate_blocks(func: &Function) -> Vec { + let mut block_vec: Vec = Vec::new(); + for block in func.layout.blocks() { + let mut ser_block: SerBlock = SerBlock::new(block.to_string()); + ser_block.params = populate_params(&func, block); + ser_block.insts = populate_inst(&func, block); + block_vec.push(ser_block); } - ebb_vec + block_vec } -/// Serializable Cranelift IR data flow graph, including all ebbs. +/// Serializable Cranelift IR data flow graph, including all blocks. impl SerDataFlowGraph { pub fn create_new(func: &Function) -> Self { Self { - ebbs: populate_ebbs(func), + blocks: populate_blocks(func), } } diff --git a/cranelift/simplejit/examples/simplejit-minimal.rs b/cranelift/simplejit/examples/simplejit-minimal.rs index 3b8e147830..ade2cd3422 100644 --- a/cranelift/simplejit/examples/simplejit-minimal.rs +++ b/cranelift/simplejit/examples/simplejit-minimal.rs @@ -27,11 +27,11 @@ fn main() { ctx.func.name = ExternalName::user(0, func_a.as_u32()); { let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); - let ebb = bcx.create_ebb(); + let block = bcx.create_block(); - bcx.switch_to_block(ebb); - bcx.append_ebb_params_for_function_params(ebb); - let param = bcx.ebb_params(ebb)[0]; + bcx.switch_to_block(block); + bcx.append_block_params_for_function_params(block); + let param = bcx.block_params(block)[0]; let cst = bcx.ins().iconst(types::I32, 37); let add = bcx.ins().iadd(cst, param); bcx.ins().return_(&[add]); @@ -45,9 +45,9 @@ fn main() { ctx.func.name = ExternalName::user(0, func_b.as_u32()); { let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); - let ebb = bcx.create_ebb(); + let block = bcx.create_block(); - bcx.switch_to_block(ebb); + bcx.switch_to_block(block); let local_func = module.declare_func_in_func(func_a, &mut bcx.func); let arg = bcx.ins().iconst(types::I32, 5); let call = bcx.ins().call(local_func, &[arg]); diff --git a/cranelift/simplejit/src/backend.rs b/cranelift/simplejit/src/backend.rs index 919a4ffe3f..45c7d3d260 100644 --- a/cranelift/simplejit/src/backend.rs +++ b/cranelift/simplejit/src/backend.rs @@ -582,7 +582,7 @@ impl SimpleJITRelocSink { } impl RelocSink for SimpleJITRelocSink { - fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { + fn reloc_block(&mut self, _offset: CodeOffset, _reloc: Reloc, _block_offset: CodeOffset) { unimplemented!(); } diff --git a/cranelift/simplejit/tests/basic.rs b/cranelift/simplejit/tests/basic.rs index a3932b1d6a..0e8ea0aa9d 100644 --- a/cranelift/simplejit/tests/basic.rs +++ b/cranelift/simplejit/tests/basic.rs @@ -41,8 +41,8 @@ fn define_simple_function(module: &mut Module) -> FuncId { let mut func_ctx = FunctionBuilderContext::new(); { let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); - let ebb = bcx.create_ebb(); - bcx.switch_to_block(ebb); + let block = bcx.create_block(); + bcx.switch_to_block(block); bcx.ins().return_(&[]); } @@ -90,16 +90,16 @@ fn switch_error() { let mut func_ctx = FunctionBuilderContext::new(); { let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut func, &mut func_ctx); - let start = bcx.create_ebb(); - let bb0 = bcx.create_ebb(); - let bb1 = bcx.create_ebb(); - let bb2 = bcx.create_ebb(); - let bb3 = bcx.create_ebb(); + let start = bcx.create_block(); + let bb0 = bcx.create_block(); + let bb1 = bcx.create_block(); + let bb2 = bcx.create_block(); + let bb3 = bcx.create_block(); println!("{} {} {} {} {}", start, bb0, bb1, bb2, bb3); bcx.declare_var(Variable::new(0), types::I32); bcx.declare_var(Variable::new(1), types::I32); - let in_val = bcx.append_ebb_param(start, types::I32); + let in_val = bcx.append_block_param(start, types::I32); bcx.switch_to_block(start); bcx.def_var(Variable::new(0), in_val); bcx.ins().jump(bb0, &[]); @@ -168,8 +168,8 @@ fn libcall_function() { let mut func_ctx = FunctionBuilderContext::new(); { let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); - let ebb = bcx.create_ebb(); - bcx.switch_to_block(ebb); + let block = bcx.create_block(); + bcx.switch_to_block(block); let int = module.target_config().pointer_type(); let zero = bcx.ins().iconst(I16, 0); diff --git a/cranelift/src/bugpoint.rs b/cranelift/src/bugpoint.rs index 5d075e0001..b409143be4 100644 --- a/cranelift/src/bugpoint.rs +++ b/cranelift/src/bugpoint.rs @@ -6,8 +6,8 @@ use cranelift_codegen::cursor::{Cursor, FuncCursor}; use cranelift_codegen::flowgraph::ControlFlowGraph; use cranelift_codegen::ir::types::{F32, F64}; use cranelift_codegen::ir::{ - self, Ebb, FuncRef, Function, GlobalValueData, Inst, InstBuilder, InstructionData, StackSlots, - TrapCode, + self, Block, FuncRef, Function, GlobalValueData, Inst, InstBuilder, InstructionData, + StackSlots, TrapCode, }; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::Context; @@ -46,17 +46,17 @@ pub fn run( std::env::set_var("RUST_BACKTRACE", "0"); // Disable backtraces to reduce verbosity for (func, _) in test_file.functions { - let (orig_ebb_count, orig_inst_count) = (ebb_count(&func), inst_count(&func)); + let (orig_block_count, orig_inst_count) = (block_count(&func), inst_count(&func)); match reduce(isa, func, verbose) { Ok((func, crash_msg)) => { println!("Crash message: {}", crash_msg); println!("\n{}", func); println!( - "{} ebbs {} insts -> {} ebbs {} insts", - orig_ebb_count, + "{} blocks {} insts -> {} blocks {} insts", + orig_block_count, orig_inst_count, - ebb_count(&func), + block_count(&func), inst_count(&func) ); } @@ -68,7 +68,7 @@ pub fn run( } enum ProgressStatus { - /// The mutation raised or reduced the amount of instructions or ebbs. + /// The mutation raised or reduced the amount of instructions or blocks. ExpandedOrShrinked, /// The mutation only changed an instruction. Performing another round of mutations may only @@ -92,16 +92,16 @@ trait Mutator { /// Try to remove instructions. struct RemoveInst { - ebb: Ebb, + block: Block, inst: Inst, } impl RemoveInst { fn new(func: &Function) -> Self { - let first_ebb = func.layout.entry_block().unwrap(); - let first_inst = func.layout.first_inst(first_ebb).unwrap(); + let first_block = func.layout.entry_block().unwrap(); + let first_inst = func.layout.first_inst(first_block).unwrap(); Self { - ebb: first_ebb, + block: first_block, inst: first_inst, } } @@ -117,12 +117,12 @@ impl Mutator for RemoveInst { } fn mutate(&mut self, mut func: Function) -> Option<(Function, String, ProgressStatus)> { - next_inst_ret_prev(&func, &mut self.ebb, &mut self.inst).map(|(prev_ebb, prev_inst)| { + next_inst_ret_prev(&func, &mut self.block, &mut self.inst).map(|(prev_block, prev_inst)| { func.layout.remove_inst(prev_inst); - let msg = if func.layout.ebb_insts(prev_ebb).next().is_none() { - // Make sure empty ebbs are removed, as `next_inst_ret_prev` depends on non empty ebbs - func.layout.remove_ebb(prev_ebb); - format!("Remove inst {} and empty ebb {}", prev_inst, prev_ebb) + let msg = if func.layout.block_insts(prev_block).next().is_none() { + // Make sure empty blocks are removed, as `next_inst_ret_prev` depends on non empty blocks + func.layout.remove_block(prev_block); + format!("Remove inst {} and empty block {}", prev_inst, prev_block) } else { format!("Remove inst {}", prev_inst) }; @@ -133,16 +133,16 @@ impl Mutator for RemoveInst { /// Try to replace instructions with `iconst` or `fconst`. struct ReplaceInstWithConst { - ebb: Ebb, + block: Block, inst: Inst, } impl ReplaceInstWithConst { fn new(func: &Function) -> Self { - let first_ebb = func.layout.entry_block().unwrap(); - let first_inst = func.layout.first_inst(first_ebb).unwrap(); + let first_block = func.layout.entry_block().unwrap(); + let first_inst = func.layout.first_inst(first_block).unwrap(); Self { - ebb: first_ebb, + block: first_block, inst: first_inst, } } @@ -174,71 +174,73 @@ impl Mutator for ReplaceInstWithConst { } fn mutate(&mut self, mut func: Function) -> Option<(Function, String, ProgressStatus)> { - next_inst_ret_prev(&func, &mut self.ebb, &mut self.inst).map(|(_prev_ebb, prev_inst)| { - let num_results = func.dfg.inst_results(prev_inst).len(); + next_inst_ret_prev(&func, &mut self.block, &mut self.inst).map( + |(_prev_block, prev_inst)| { + let num_results = func.dfg.inst_results(prev_inst).len(); - let opcode = func.dfg[prev_inst].opcode(); - if num_results == 0 - || opcode == ir::Opcode::Iconst - || opcode == ir::Opcode::F32const - || opcode == ir::Opcode::F64const - { - return (func, format!(""), ProgressStatus::Skip); - } + let opcode = func.dfg[prev_inst].opcode(); + if num_results == 0 + || opcode == ir::Opcode::Iconst + || opcode == ir::Opcode::F32const + || opcode == ir::Opcode::F64const + { + return (func, format!(""), ProgressStatus::Skip); + } - if num_results == 1 { - let ty = func.dfg.value_type(func.dfg.first_result(prev_inst)); - let new_inst_name = Self::const_for_type(func.dfg.replace(prev_inst), ty); - return ( + if num_results == 1 { + let ty = func.dfg.value_type(func.dfg.first_result(prev_inst)); + let new_inst_name = Self::const_for_type(func.dfg.replace(prev_inst), ty); + return ( + func, + format!("Replace inst {} with {}.", prev_inst, new_inst_name), + ProgressStatus::Changed, + ); + } + + // At least 2 results. Replace each instruction with as many const instructions as + // there are results. + let mut pos = FuncCursor::new(&mut func).at_inst(prev_inst); + + // Copy result SSA names into our own vector; otherwise we couldn't mutably borrow pos + // in the loop below. + let results = pos.func.dfg.inst_results(prev_inst).to_vec(); + + // Detach results from the previous instruction, since we're going to reuse them. + pos.func.dfg.clear_results(prev_inst); + + let mut inst_names = Vec::new(); + for r in results { + let ty = pos.func.dfg.value_type(r); + let builder = pos.ins().with_results([Some(r)]); + let new_inst_name = Self::const_for_type(builder, ty); + inst_names.push(new_inst_name); + } + + // Remove the instruction. + assert_eq!(pos.remove_inst(), prev_inst); + + ( func, - format!("Replace inst {} with {}.", prev_inst, new_inst_name), - ProgressStatus::Changed, - ); - } - - // At least 2 results. Replace each instruction with as many const instructions as - // there are results. - let mut pos = FuncCursor::new(&mut func).at_inst(prev_inst); - - // Copy result SSA names into our own vector; otherwise we couldn't mutably borrow pos - // in the loop below. - let results = pos.func.dfg.inst_results(prev_inst).to_vec(); - - // Detach results from the previous instruction, since we're going to reuse them. - pos.func.dfg.clear_results(prev_inst); - - let mut inst_names = Vec::new(); - for r in results { - let ty = pos.func.dfg.value_type(r); - let builder = pos.ins().with_results([Some(r)]); - let new_inst_name = Self::const_for_type(builder, ty); - inst_names.push(new_inst_name); - } - - // Remove the instruction. - assert_eq!(pos.remove_inst(), prev_inst); - - ( - func, - format!("Replace inst {} with {}", prev_inst, inst_names.join(" / ")), - ProgressStatus::ExpandedOrShrinked, - ) - }) + format!("Replace inst {} with {}", prev_inst, inst_names.join(" / ")), + ProgressStatus::ExpandedOrShrinked, + ) + }, + ) } } /// Try to replace instructions with `trap`. struct ReplaceInstWithTrap { - ebb: Ebb, + block: Block, inst: Inst, } impl ReplaceInstWithTrap { fn new(func: &Function) -> Self { - let first_ebb = func.layout.entry_block().unwrap(); - let first_inst = func.layout.first_inst(first_ebb).unwrap(); + let first_block = func.layout.entry_block().unwrap(); + let first_inst = func.layout.first_inst(first_block).unwrap(); Self { - ebb: first_ebb, + block: first_block, inst: first_inst, } } @@ -254,54 +256,56 @@ impl Mutator for ReplaceInstWithTrap { } fn mutate(&mut self, mut func: Function) -> Option<(Function, String, ProgressStatus)> { - next_inst_ret_prev(&func, &mut self.ebb, &mut self.inst).map(|(_prev_ebb, prev_inst)| { - let status = if func.dfg[prev_inst].opcode() == ir::Opcode::Trap { - ProgressStatus::Skip - } else { - func.dfg.replace(prev_inst).trap(TrapCode::User(0)); - ProgressStatus::Changed - }; - ( - func, - format!("Replace inst {} with trap", prev_inst), - status, - ) - }) + next_inst_ret_prev(&func, &mut self.block, &mut self.inst).map( + |(_prev_block, prev_inst)| { + let status = if func.dfg[prev_inst].opcode() == ir::Opcode::Trap { + ProgressStatus::Skip + } else { + func.dfg.replace(prev_inst).trap(TrapCode::User(0)); + ProgressStatus::Changed + }; + ( + func, + format!("Replace inst {} with trap", prev_inst), + status, + ) + }, + ) } } -/// Try to remove an ebb. -struct RemoveEbb { - ebb: Ebb, +/// Try to remove an block. +struct RemoveBlock { + block: Block, } -impl RemoveEbb { +impl RemoveBlock { fn new(func: &Function) -> Self { Self { - ebb: func.layout.entry_block().unwrap(), + block: func.layout.entry_block().unwrap(), } } } -impl Mutator for RemoveEbb { +impl Mutator for RemoveBlock { fn name(&self) -> &'static str { - "remove ebb" + "remove block" } fn mutation_count(&self, func: &Function) -> usize { - ebb_count(func) + block_count(func) } fn mutate(&mut self, mut func: Function) -> Option<(Function, String, ProgressStatus)> { - func.layout.next_ebb(self.ebb).map(|next_ebb| { - self.ebb = next_ebb; - while let Some(inst) = func.layout.last_inst(self.ebb) { + func.layout.next_block(self.block).map(|next_block| { + self.block = next_block; + while let Some(inst) = func.layout.last_inst(self.block) { func.layout.remove_inst(inst); } - func.layout.remove_ebb(self.ebb); + func.layout.remove_block(self.block); ( func, - format!("Remove ebb {}", next_ebb), + format!("Remove block {}", next_block), ProgressStatus::ExpandedOrShrinked, ) }) @@ -333,8 +337,8 @@ impl Mutator for RemoveUnusedEntities { let name = match self.kind { 0 => { let mut ext_func_usage_map = HashMap::new(); - for ebb in func.layout.ebbs() { - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + for inst in func.layout.block_insts(block) { match func.dfg[inst] { // Add new cases when there are new instruction formats taking a `FuncRef`. InstructionData::Call { func_ref, .. } @@ -383,8 +387,8 @@ impl Mutator for RemoveUnusedEntities { } let mut signatures_usage_map = HashMap::new(); - for ebb in func.layout.ebbs() { - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + for inst in func.layout.block_insts(block) { // Add new cases when there are new instruction formats taking a `SigRef`. if let InstructionData::CallIndirect { sig_ref, .. } = func.dfg[inst] { signatures_usage_map @@ -431,8 +435,8 @@ impl Mutator for RemoveUnusedEntities { } 2 => { let mut stack_slot_usage_map = HashMap::new(); - for ebb in func.layout.ebbs() { - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + for inst in func.layout.block_insts(block) { match func.dfg[inst] { // Add new cases when there are new instruction formats taking a `StackSlot`. InstructionData::StackLoad { stack_slot, .. } @@ -490,8 +494,8 @@ impl Mutator for RemoveUnusedEntities { } 3 => { let mut global_value_usage_map = HashMap::new(); - for ebb in func.layout.ebbs() { - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + for inst in func.layout.block_insts(block) { // Add new cases when there are new instruction formats taking a `GlobalValue`. if let InstructionData::UnaryGlobalValue { global_value, .. } = func.dfg[inst] @@ -545,15 +549,15 @@ impl Mutator for RemoveUnusedEntities { } struct MergeBlocks { - ebb: Ebb, - prev_ebb: Option, + block: Block, + prev_block: Option, } impl MergeBlocks { fn new(func: &Function) -> Self { Self { - ebb: func.layout.entry_block().unwrap(), - prev_ebb: None, + block: func.layout.entry_block().unwrap(), + prev_block: None, } } } @@ -564,30 +568,30 @@ impl Mutator for MergeBlocks { } fn mutation_count(&self, func: &Function) -> usize { - // N ebbs may result in at most N-1 merges. - ebb_count(func) - 1 + // N blocks may result in at most N-1 merges. + block_count(func) - 1 } fn mutate(&mut self, mut func: Function) -> Option<(Function, String, ProgressStatus)> { - let ebb = match func.layout.next_ebb(self.ebb) { - Some(ebb) => ebb, + let block = match func.layout.next_block(self.block) { + Some(block) => block, None => return None, }; - self.ebb = ebb; + self.block = block; let mut cfg = ControlFlowGraph::new(); cfg.compute(&func); - if cfg.pred_iter(ebb).count() != 1 { + if cfg.pred_iter(block).count() != 1 { return Some(( func, - format!("did nothing for {}", ebb), + format!("did nothing for {}", block), ProgressStatus::Skip, )); } - let pred = cfg.pred_iter(ebb).next().unwrap(); + let pred = cfg.pred_iter(block).next().unwrap(); // If the branch instruction that lead us to this block is preceded by another branch // instruction, then we have a conditional jump sequence that we should not break by @@ -596,86 +600,90 @@ impl Mutator for MergeBlocks { if func.dfg[pred_pred_inst].opcode().is_branch() { return Some(( func, - format!("did nothing for {}", ebb), + format!("did nothing for {}", block), ProgressStatus::Skip, )); } } - assert!(func.dfg.ebb_params(ebb).len() == func.dfg.inst_variable_args(pred.inst).len()); + assert!(func.dfg.block_params(block).len() == func.dfg.inst_variable_args(pred.inst).len()); - // If there were any EBB parameters in ebb, then the last instruction in pred will - // fill these parameters. Make the EBB params aliases of the terminator arguments. - for (ebb_param, arg) in func + // If there were any block parameters in block, then the last instruction in pred will + // fill these parameters. Make the block params aliases of the terminator arguments. + for (block_param, arg) in func .dfg - .detach_ebb_params(ebb) + .detach_block_params(block) .as_slice(&func.dfg.value_lists) .iter() .cloned() .zip(func.dfg.inst_variable_args(pred.inst).iter().cloned()) .collect::>() { - if ebb_param != arg { - func.dfg.change_to_alias(ebb_param, arg); + if block_param != arg { + func.dfg.change_to_alias(block_param, arg); } } - // Remove the terminator branch to the current EBB. + // Remove the terminator branch to the current block. func.layout.remove_inst(pred.inst); // Move all the instructions to the predecessor. - while let Some(inst) = func.layout.first_inst(ebb) { + while let Some(inst) = func.layout.first_inst(block) { func.layout.remove_inst(inst); - func.layout.append_inst(inst, pred.ebb); + func.layout.append_inst(inst, pred.block); } - // Remove the predecessor EBB. - func.layout.remove_ebb(ebb); + // Remove the predecessor block. + func.layout.remove_block(block); - // Record the previous EBB: if we caused a crash (as signaled by a call to did_crash), then - // we'll start back to this EBB. - self.prev_ebb = Some(pred.ebb); + // Record the previous block: if we caused a crash (as signaled by a call to did_crash), then + // we'll start back to this block. + self.prev_block = Some(pred.block); Some(( func, - format!("merged {} and {}", pred.ebb, ebb), + format!("merged {} and {}", pred.block, block), ProgressStatus::ExpandedOrShrinked, )) } fn did_crash(&mut self) { - self.ebb = self.prev_ebb.unwrap(); + self.block = self.prev_block.unwrap(); } } -fn next_inst_ret_prev(func: &Function, ebb: &mut Ebb, inst: &mut Inst) -> Option<(Ebb, Inst)> { - let prev = (*ebb, *inst); +fn next_inst_ret_prev( + func: &Function, + block: &mut Block, + inst: &mut Inst, +) -> Option<(Block, Inst)> { + let prev = (*block, *inst); if let Some(next_inst) = func.layout.next_inst(*inst) { *inst = next_inst; return Some(prev); } - if let Some(next_ebb) = func.layout.next_ebb(*ebb) { - *ebb = next_ebb; - *inst = func.layout.first_inst(*ebb).expect("no inst"); + if let Some(next_block) = func.layout.next_block(*block) { + *block = next_block; + *inst = func.layout.first_inst(*block).expect("no inst"); return Some(prev); } None } -fn ebb_count(func: &Function) -> usize { - func.layout.ebbs().count() +fn block_count(func: &Function) -> usize { + func.layout.blocks().count() } fn inst_count(func: &Function) -> usize { func.layout - .ebbs() - .map(|ebb| func.layout.ebb_insts(ebb).count()) + .blocks() + .map(|block| func.layout.block_insts(block).count()) .sum() } fn resolve_aliases(func: &mut Function) { - for ebb in func.layout.ebbs() { - for inst in func.layout.ebb_insts(ebb) { + for block in func.layout.blocks() { + for inst in func.layout.block_insts(block) { func.dfg.resolve_aliases_in_arguments(inst); } } @@ -713,7 +721,7 @@ fn reduce( 0 => Box::new(RemoveInst::new(&func)), 1 => Box::new(ReplaceInstWithConst::new(&func)), 2 => Box::new(ReplaceInstWithTrap::new(&func)), - 3 => Box::new(RemoveEbb::new(&func)), + 3 => Box::new(RemoveBlock::new(&func)), 4 => Box::new(RemoveUnusedEntities::new()), 5 => Box::new(MergeBlocks::new(&func)), _ => break, @@ -775,10 +783,10 @@ fn reduce( } progress_bar.println(format!( - "After pass {}, remaining insts/ebbs: {}/{} ({})", + "After pass {}, remaining insts/blocks: {}/{} ({})", pass_idx, inst_count(&func), - ebb_count(&func), + block_count(&func), if should_keep_reducing { "will keep reducing" } else { @@ -861,7 +869,7 @@ impl<'a> CrashCheckContext<'a> { Ok(None) => {} // The verifier panicked. Compiling it will probably give the same panic. // We treat it as succeeding to make it possible to reduce for the actual error. - // FIXME prevent verifier panic on removing ebb0. + // FIXME prevent verifier panic on removing block0. Err(_) => return CheckResult::Succeed, } @@ -869,11 +877,13 @@ impl<'a> CrashCheckContext<'a> { { // For testing purposes we emulate a panic caused by the existence of // a `call` instruction. - let contains_call = func.layout.ebbs().any(|ebb| { - func.layout.ebb_insts(ebb).any(|inst| match func.dfg[inst] { - InstructionData::Call { .. } => true, - _ => false, - }) + let contains_call = func.layout.blocks().any(|block| { + func.layout + .block_insts(block) + .any(|inst| match func.dfg[inst] { + InstructionData::Call { .. } => true, + _ => false, + }) }); if contains_call { return CheckResult::Crash("test crash".to_string()); @@ -934,9 +944,9 @@ mod tests { assert_eq!(crash_msg, "test crash"); assert_eq!( - ebb_count(&func_reduced_twice), - ebb_count(&reduced_func), - "reduction wasn't maximal for ebbs" + block_count(&func_reduced_twice), + block_count(&reduced_func), + "reduction wasn't maximal for blocks" ); assert_eq!( inst_count(&func_reduced_twice), diff --git a/cranelift/src/disasm.rs b/cranelift/src/disasm.rs index 5e6d3bbe33..ba80e3d57f 100644 --- a/cranelift/src/disasm.rs +++ b/cranelift/src/disasm.rs @@ -18,14 +18,19 @@ impl PrintRelocs { } impl binemit::RelocSink for PrintRelocs { - fn reloc_ebb( + fn reloc_block( &mut self, where_: binemit::CodeOffset, r: binemit::Reloc, offset: binemit::CodeOffset, ) { if self.flag_print { - writeln!(&mut self.text, "reloc_ebb: {} {} at {}", r, offset, where_).unwrap(); + writeln!( + &mut self.text, + "reloc_block: {} {} at {}", + r, offset, where_ + ) + .unwrap(); } } diff --git a/cranelift/src/run.rs b/cranelift/src/run.rs index 7825b1852d..0f553c8702 100644 --- a/cranelift/src/run.rs +++ b/cranelift/src/run.rs @@ -119,7 +119,7 @@ mod test { let code = String::from( " function %test() -> b8 { - ebb0: + block0: nop v1 = bconst.b8 true return v1 diff --git a/cranelift/tests/bugpoint_test.clif b/cranelift/tests/bugpoint_test.clif index 772b36d58e..b2e9acc37e 100644 --- a/cranelift/tests/bugpoint_test.clif +++ b/cranelift/tests/bugpoint_test.clif @@ -288,7 +288,7 @@ function u0:0(i64, i64, i64) system_v { fn103 = u0:13 sig103 fn104 = u0:95 sig104 -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): v113 -> v1 v124 -> v1 v136 -> v1 @@ -411,9 +411,9 @@ ebb0(v0: i64, v1: i64, v2: i64): v110 = stack_addr.i64 ss105 v111 = stack_addr.i64 ss106 v112 = stack_addr.i64 ss107 - jump ebb1 + jump block1 -ebb1: +block1: v114 = load.i64 v113 v115 = iconst.i64 0 v116 = icmp ugt v114, v115 @@ -422,15 +422,15 @@ ebb1: v119 = icmp_imm eq v118, 0 v120 = bint.i8 v119 v121 = uextend.i32 v120 - brz v121, ebb3 - jump ebb2 + brz v121, block3 + jump block2 -ebb2: +block2: v122 = global_value.i64 gv0 v123 = global_value.i64 gv1 trap user65535 -ebb3: +block3: v125 = iadd_imm.i64 v124, 8 v126 = load.i64 v125 v127 = iconst.i64 0 @@ -440,15 +440,15 @@ ebb3: v131 = icmp_imm eq v130, 0 v132 = bint.i8 v131 v133 = uextend.i32 v132 - brz v133, ebb5 - jump ebb4 + brz v133, block5 + jump block4 -ebb4: +block4: v134 = global_value.i64 gv2 v135 = global_value.i64 gv3 trap user65535 -ebb5: +block5: v137 = iadd_imm.i64 v136, 16 v138 = load.i64 v137+42 v139 = iconst.i64 0 @@ -458,71 +458,71 @@ ebb5: v143 = icmp_imm eq v142, 0 v144 = bint.i8 v143 v145 = uextend.i32 v144 - brz v145, ebb7 - jump ebb6 + brz v145, block7 + jump block6 -ebb6: +block6: v146 = global_value.i64 gv4 v147 = global_value.i64 gv5 trap user65535 -ebb7: +block7: v149 = load.i64 v148 v150 = iadd_imm.i64 v148, 16 v151 = load.i64 v150 call fn6(v7, v149, v151) - jump ebb8 + jump block8 -ebb8: +block8: v152 = call fn7(v7) - jump ebb9 + jump block9 -ebb9: +block9: v153 = load.i8 v6 v154 = uextend.i32 v153 v155 = icmp_imm eq v154, 0 v156 = bint.i8 v155 v157 = uextend.i32 v156 - brz v157, ebb11 - jump ebb10 + brz v157, block11 + jump block10 -ebb10: +block10: v158 = global_value.i64 gv6 v159 = global_value.i64 gv7 trap user65535 -ebb11: +block11: v161 = load.i64 v160 v162 = iadd_imm.i64 v160, 8 v163 = load.i64 v162 call fn10(v9, v161, v163) - jump ebb12 + jump block12 -ebb12: +block12: v164 = call fn11(v9) - jump ebb13 + jump block13 -ebb13: +block13: v165 = load.i8 v8 v166 = uextend.i32 v165 v167 = icmp_imm eq v166, 0 v168 = bint.i8 v167 v169 = uextend.i32 v168 - brz v169, ebb15 - jump ebb14 + brz v169, block15 + jump block14 -ebb14: +block14: v170 = global_value.i64 gv8 v171 = global_value.i64 gv9 trap user65535 -ebb15: +block15: v172 = load.i64 aligned v3 v173 = load.i64 aligned v3+8 v174 = call fn14(v11) - jump ebb16 + jump block16 -ebb16: +block16: v175 = iconst.i64 17 v176 = load.i64 v10 v177 = icmp uge v176, v175 @@ -531,15 +531,15 @@ ebb16: v180 = icmp_imm eq v179, 0 v181 = bint.i8 v180 v182 = uextend.i32 v181 - brz v182, ebb18 - jump ebb17 + brz v182, block18 + jump block17 -ebb17: +block17: v183 = global_value.i64 gv10 v184 = global_value.i64 gv11 trap user65535 -ebb18: +block18: v186 = load.i64 v185 v187 = iadd_imm.i64 v185, 16 v188 = load.i64 v187 @@ -552,14 +552,14 @@ ebb18: v195 = iadd_imm.i64 v12, 8 v196 = load.i8 v195 v197 = uextend.i32 v196 - brz v197, ebb19 - jump ebb164 + brz v197, block19 + jump block164 -ebb164: +block164: v198 = global_value.i64 gv12 trap user0 -ebb19: +block19: v199 = load.i64 v12 v213 -> v199 v200 = iconst.i64 1 @@ -573,14 +573,14 @@ ebb19: v208 = iadd_imm.i64 v13, 8 v209 = load.i8 v208 v210 = uextend.i32 v209 - brz v210, ebb20 - jump ebb163 + brz v210, block20 + jump block163 -ebb163: +block163: v211 = global_value.i64 gv13 trap user0 -ebb20: +block20: v212 = load.i64 v13 v214 = icmp.i64 ult v213, v212 v215 = bint.i8 v214 @@ -588,15 +588,15 @@ ebb20: v217 = icmp_imm eq v216, 0 v218 = bint.i8 v217 v219 = uextend.i32 v218 - brz v219, ebb22 - jump ebb21 + brz v219, block22 + jump block21 -ebb21: +block21: v220 = global_value.i64 gv14 v221 = global_value.i64 gv15 trap user65535 -ebb22: +block22: v223 = load.i64 v222 v224 = iadd_imm.i64 v222, 16 v225 = load.i64 v224 @@ -609,22 +609,22 @@ ebb22: v232 = iadd_imm.i64 v16, 8 v233 = load.i8 v232 v234 = uextend.i32 v233 - brz v234, ebb23 - jump ebb162 + brz v234, block23 + jump block162 -ebb162: +block162: v235 = global_value.i64 gv16 trap user0 -ebb23: +block23: v236 = load.i64 v16 v238 = iadd_imm.i64 v237, 24 v239 = load.i16 v238 v240 = iadd_imm.i64 v15, 8 call fn22(v14, v15) - jump ebb24 + jump block24 -ebb24: +block24: v242 = load.i64 v241 v243 = iadd_imm.i64 v241, 8 v244 = load.i64 v243 @@ -637,14 +637,14 @@ ebb24: v251 = iadd_imm.i64 v19, 8 v252 = load.i8 v251 v253 = uextend.i32 v252 - brz v253, ebb25 - jump ebb161 + brz v253, block25 + jump block161 -ebb161: +block161: v254 = global_value.i64 gv17 trap user0 -ebb25: +block25: v255 = load.i64 v19 v257 = iadd_imm.i64 v256, 24 v258 = load.i16 v257 @@ -652,9 +652,9 @@ ebb25: v260 = iadd_imm.i64 v14, 8 v261 = load.i16 v260 call fn24(v17, v18, v261) - jump ebb26 + jump block26 -ebb26: +block26: v263 = load.i64 v262 v264 = iadd_imm.i64 v262, 24 v265 = load.i16 v264 @@ -662,9 +662,9 @@ ebb26: v267 = iadd_imm.i64 v14, 8 v268 = load.i16 v267 call fn25(v20, v21, v268) - jump ebb27 + jump block27 -ebb27: +block27: v269 = iadd_imm.i64 v14, 8 v270 = load.i16 v269 v271 = iconst.i16 -60 @@ -676,14 +676,14 @@ ebb27: v277 = iadd_imm.i64 v24, 2 v278 = load.i8 v277 v279 = uextend.i32 v278 - brz v279, ebb28 - jump ebb160 + brz v279, block28 + jump block160 -ebb160: +block160: v280 = global_value.i64 gv18 trap user0 -ebb28: +block28: v281 = load.i16 v24 v282 = iconst.i16 64 v283 = isub v281, v282 @@ -694,14 +694,14 @@ ebb28: v288 = iadd_imm.i64 v25, 2 v289 = load.i8 v288 v290 = uextend.i32 v289 - brz v290, ebb29 - jump ebb159 + brz v290, block29 + jump block159 -ebb159: +block159: v291 = global_value.i64 gv19 trap user0 -ebb29: +block29: v292 = load.i16 v25 v317 -> v292 v293 = iadd_imm.i64 v14, 8 @@ -715,14 +715,14 @@ ebb29: v301 = iadd_imm.i64 v26, 2 v302 = load.i8 v301 v303 = uextend.i32 v302 - brz v303, ebb30 - jump ebb158 + brz v303, block30 + jump block158 -ebb158: +block158: v304 = global_value.i64 gv20 trap user0 -ebb30: +block30: v305 = load.i16 v26 v306 = iconst.i16 64 v307 = isub v305, v306 @@ -733,42 +733,42 @@ ebb30: v312 = iadd_imm.i64 v27, 2 v313 = load.i8 v312 v314 = uextend.i32 v313 - brz v314, ebb31 - jump ebb157 + brz v314, block31 + jump block157 -ebb157: +block157: v315 = global_value.i64 gv21 trap user0 -ebb31: +block31: v316 = load.i16 v27 call fn30(v23, v317, v316) - jump ebb32 + jump block32 -ebb32: +block32: v318 = load.i16 v23 v1007 -> v318 v319 = iadd_imm.i64 v23, 8 v320 = load.i64 aligned v319 v321 = load.i64 aligned v319+8 call fn31(v28, v14, v22) - jump ebb33 + jump block33 -ebb33: +block33: call fn32(v29, v17, v22) - jump ebb34 + jump block34 -ebb34: +block34: call fn33(v30, v20, v22) - jump ebb35 + jump block35 -ebb35: +block35: v322 = iconst.i8 1 v323 = uextend.i32 v322 - brz v323, ebb42 - jump ebb36 + brz v323, block42 + jump block36 -ebb36: +block36: v324 = iadd_imm.i64 v28, 8 v325 = iadd_imm.i64 v29, 8 v326 = iadd_imm.i64 v31, 8 @@ -785,10 +785,10 @@ ebb36: v335 = icmp_imm eq v334, 0 v336 = bint.i8 v335 v337 = uextend.i32 v336 - brz v337, ebb38 - jump ebb37 + brz v337, block38 + jump block37 -ebb37: +block37: v338 = global_value.i64 gv22 v339 = iconst.i64 3 v342 = iadd_imm.i64 v36, 8 @@ -798,17 +798,17 @@ ebb37: v347 -> v345 v346 = func_addr.i64 fn34 call fn35(v39, v343, v346) - jump ebb39 + jump block39 -ebb38: - jump ebb42 +block38: + jump block42 -ebb39: +block39: v348 = func_addr.i64 fn36 call fn37(v40, v347, v348) - jump ebb40 + jump block40 -ebb40: +block40: v349 = iconst.i64 0 v350 = imul_imm v349, 16 v351 = iadd.i64 v35, v350 @@ -821,21 +821,21 @@ ebb40: v358 = load.i64 aligned v40+8 v359 = iconst.i64 2 call fn38(v32, v33, v34) - jump ebb41 + jump block41 -ebb41: +block41: v360 = global_value.i64 gv23 call fn39(v32, v360) v361 = global_value.i64 gv24 trap user65535 -ebb42: +block42: v362 = iconst.i8 1 v363 = uextend.i32 v362 - brz v363, ebb49(v1007) - jump ebb43 + brz v363, block49(v1007) + jump block43 -ebb43: +block43: v364 = iadd_imm.i64 v28, 8 v365 = iadd_imm.i64 v30, 8 v366 = iadd_imm.i64 v41, 8 @@ -852,10 +852,10 @@ ebb43: v375 = icmp_imm eq v374, 0 v376 = bint.i8 v375 v377 = uextend.i32 v376 - brz v377, ebb45 - jump ebb44 + brz v377, block45 + jump block44 -ebb44: +block44: v378 = global_value.i64 gv25 v379 = iconst.i64 3 v382 = iadd_imm.i64 v46, 8 @@ -865,17 +865,17 @@ ebb44: v387 -> v385 v386 = func_addr.i64 fn41 call fn42(v49, v383, v386) - jump ebb46 + jump block46 -ebb45: - jump ebb49(v1007) +block45: + jump block49(v1007) -ebb46: +block46: v388 = func_addr.i64 fn43 call fn44(v50, v387, v388) - jump ebb47 + jump block47 -ebb47: +block47: v389 = iconst.i64 0 v390 = imul_imm v389, 16 v391 = iadd.i64 v45, v390 @@ -888,15 +888,15 @@ ebb47: v398 = load.i64 aligned v50+8 v399 = iconst.i64 2 call fn45(v42, v43, v44) - jump ebb48 + jump block48 -ebb48: +block48: v400 = global_value.i64 gv26 call fn46(v42, v400) v401 = global_value.i64 gv27 trap user65535 -ebb49(v1006: i16): +block49(v1006: i16): v486 -> v1006 v402 = load.i64 v28 v403 = iconst.i64 1 @@ -909,14 +909,14 @@ ebb49(v1006: i16): v410 = iadd_imm.i64 v51, 8 v411 = load.i8 v410 v412 = uextend.i32 v411 - brz v412, ebb50 - jump ebb156 + brz v412, block50 + jump block156 -ebb156: +block156: v413 = global_value.i64 gv28 trap user0 -ebb50: +block50: v414 = load.i64 v51 v439 -> v414 v452 -> v414 @@ -933,14 +933,14 @@ ebb50: v423 = iadd_imm.i64 v52, 8 v424 = load.i8 v423 v425 = uextend.i32 v424 - brz v425, ebb51 - jump ebb155 + brz v425, block51 + jump block155 -ebb155: +block155: v426 = global_value.i64 gv29 trap user0 -ebb51: +block51: v427 = load.i64 v52 v509 -> v427 v428 = iadd_imm.i64 v28, 8 @@ -950,14 +950,14 @@ ebb51: v431 = icmp eq v429, v430 v432 = bint.i8 v431 v433 = uextend.i32 v432 - brz v433, ebb52 - jump ebb154 + brz v433, block52 + jump block154 -ebb154: +block154: v434 = global_value.i64 gv30 trap user0 -ebb52: +block52: v436 = iconst.i16 0 v437 = isub v436, v435 v438 = sextend.i64 v437 @@ -972,14 +972,14 @@ ebb52: v446 = iadd_imm.i64 v53, 8 v447 = load.i8 v446 v448 = uextend.i32 v447 - brz v448, ebb53 - jump ebb153 + brz v448, block53 + jump block153 -ebb153: +block153: v449 = global_value.i64 gv31 trap user0 -ebb53: +block53: v450 = load.i64 v53 v451 = ireduce.i32 v450 v480 -> v451 @@ -994,14 +994,14 @@ ebb53: v461 = iadd_imm.i64 v54, 8 v462 = load.i8 v461 v463 = uextend.i32 v462 - brz v463, ebb54 - jump ebb152 + brz v463, block54 + jump block152 -ebb152: +block152: v464 = global_value.i64 gv32 trap user0 -ebb54: +block54: v465 = load.i64 v54 v466 = iconst.i64 1 v467 = isub v465, v466 @@ -1013,20 +1013,20 @@ ebb54: v473 = iadd_imm.i64 v55, 8 v474 = load.i8 v473 v475 = uextend.i32 v474 - brz v475, ebb55 - jump ebb151 + brz v475, block55 + jump block151 -ebb151: +block151: v476 = global_value.i64 gv33 trap user0 -ebb55: +block55: v477 = load.i64 v55 v479 = band.i64 v478, v477 call fn54(v56, v480) - jump ebb56 + jump block56 -ebb56: +block56: v481 = load.i8 v56 v548 -> v481 v482 = iadd_imm.i64 v56, 4 @@ -1042,14 +1042,14 @@ ebb56: v492 = iadd_imm.i64 v57, 2 v493 = load.i8 v492 v494 = uextend.i32 v493 - brz v494, ebb57 - jump ebb150 + brz v494, block57 + jump block150 -ebb150: +block150: v495 = global_value.i64 gv34 trap user0 -ebb57: +block57: v496 = load.i16 v57 v497 = iconst.i16 1 v498 = iadd v496, v497 @@ -1060,14 +1060,14 @@ ebb57: v503 = iadd_imm.i64 v58, 2 v504 = load.i8 v503 v505 = uextend.i32 v504 - brz v505, ebb58 - jump ebb149 + brz v505, block58 + jump block149 -ebb149: +block149: v506 = global_value.i64 gv35 trap user0 -ebb58: +block58: v507 = load.i16 v58 v510 = isub.i64 v508, v509 v511 = iconst.i8 0 @@ -1078,14 +1078,14 @@ ebb58: v516 = iadd_imm.i64 v59, 8 v517 = load.i8 v516 v518 = uextend.i32 v517 - brz v518, ebb59 - jump ebb148 + brz v518, block59 + jump block148 -ebb148: +block148: v519 = global_value.i64 gv36 trap user0 -ebb59: +block59: v520 = load.i64 v59 v546 -> v520 v522 = iconst.i64 1 @@ -1098,14 +1098,14 @@ ebb59: v529 = iadd_imm.i64 v60, 8 v530 = load.i8 v529 v531 = uextend.i32 v530 - brz v531, ebb60 - jump ebb147 + brz v531, block60 + jump block147 -ebb147: +block147: v532 = global_value.i64 gv37 trap user0 -ebb60: +block60: v533 = load.i64 v60 v534 = iconst.i64 1 v535 = isub v533, v534 @@ -1117,20 +1117,20 @@ ebb60: v541 = iadd_imm.i64 v61, 8 v542 = load.i8 v541 v543 = uextend.i32 v542 - brz v543, ebb61 - jump ebb146 + brz v543, block61 + jump block146 -ebb146: +block146: v544 = global_value.i64 gv38 trap user0 -ebb61: +block61: v545 = load.i64 v61 v547 = band.i64 v546, v545 v549 = uextend.i16 v548 - jump ebb62(v551, v484, v521, v479, v520, v507, v508, v548, v547) + jump block62(v551, v484, v521, v479, v520, v507, v508, v548, v547) -ebb62(v552: i32, v1009: i64, v1013: i64, v1016: i64, v1019: i64, v1022: i16, v1025: i64, v1028: i8, v1033: i64): +block62(v552: i32, v1009: i64, v1013: i64, v1016: i64, v1019: i64, v1022: i16, v1025: i64, v1028: i8, v1033: i64): v559 -> v552 v562 -> v552 v569 -> v552 @@ -1173,14 +1173,14 @@ ebb62(v552: i32, v1009: i64, v1013: i64, v1016: i64, v1019: i64, v1022: i16, v10 v555 = icmp eq v553, v554 v556 = bint.i8 v555 v557 = uextend.i32 v556 - brz v557, ebb63 - jump ebb145 + brz v557, block63 + jump block145 -ebb145: +block145: v558 = global_value.i64 gv39 trap user0 -ebb63: +block63: v561 = udiv.i32 v559, v560 v574 -> v561 v563 = load.i32 v63 @@ -1189,24 +1189,24 @@ ebb63: v565 = icmp eq v563, v564 v566 = bint.i8 v565 v567 = uextend.i32 v566 - brz v567, ebb64 - jump ebb144 + brz v567, block64 + jump block144 -ebb144: +block144: v568 = global_value.i64 gv40 trap user0 -ebb64: +block64: v571 = urem.i32 v569, v570 v622 -> v571 v803 -> v571 v1011 -> v571 v572 = iconst.i8 1 v573 = uextend.i32 v572 - brz v573, ebb68(v561) - jump ebb65 + brz v573, block68(v561) + jump block65 -ebb65: +block65: v575 = iconst.i32 10 v576 = icmp.i32 ult v574, v575 v577 = bint.i8 v576 @@ -1214,18 +1214,18 @@ ebb65: v579 = icmp_imm eq v578, 0 v580 = bint.i8 v579 v581 = uextend.i32 v580 - brz v581, ebb67 - jump ebb66 + brz v581, block67 + jump block66 -ebb66: +block66: v582 = global_value.i64 gv41 v583 = global_value.i64 gv42 trap user65535 -ebb67: - jump ebb68(v574) +block67: + jump block68(v574) -ebb68(v584: i32): +block68(v584: i32): v585 = ireduce.i8 v584 v586 = iconst.i8 48 v587 = iadd v586, v585 @@ -1236,27 +1236,27 @@ ebb68(v584: i32): v592 = iadd_imm.i64 v64, 1 v593 = load.i8 v592 v594 = uextend.i32 v593 - brz v594, ebb69 - jump ebb143 + brz v594, block69 + jump block143 -ebb143: +block143: v595 = global_value.i64 gv43 trap user0 -ebb69: +block69: v597 = load.i64 v3 v598 = load.i64 v3+8 v599 = icmp.i64 ult v596, v598 v600 = bint.i8 v599 v601 = uextend.i32 v600 - brnz v601, ebb70 - jump ebb142 + brnz v601, block70 + jump block142 -ebb142: +block142: v602 = global_value.i64 gv44 trap user0 -ebb70: +block70: v603 = load.i64 v3 v604 = load.i64 v3+8 v606 = imul_imm.i64 v605, 1 @@ -1272,14 +1272,14 @@ ebb70: v617 = iadd_imm.i64 v65, 8 v618 = load.i8 v617 v619 = uextend.i32 v618 - brz v619, ebb71 - jump ebb141 + brz v619, block71 + jump block141 -ebb141: +block141: v620 = global_value.i64 gv45 trap user0 -ebb71: +block71: v621 = load.i64 v65 v668 -> v621 v695 -> v621 @@ -1295,14 +1295,14 @@ ebb71: v631 = iadd_imm.i64 v66, 8 v632 = load.i8 v631 v633 = uextend.i32 v632 - brz v633, ebb72 - jump ebb140 + brz v633, block72 + jump block140 -ebb140: +block140: v634 = global_value.i64 gv46 trap user0 -ebb72: +block72: v635 = load.i64 v66 v637 = iadd v635, v636 v638 = iconst.i8 0 @@ -1313,24 +1313,24 @@ ebb72: v643 = iadd_imm.i64 v67, 8 v644 = load.i8 v643 v645 = uextend.i32 v644 - brz v645, ebb73 - jump ebb139 + brz v645, block73 + jump block139 -ebb139: +block139: v646 = global_value.i64 gv47 trap user0 -ebb73: +block73: v647 = load.i64 v67 v675 -> v647 v692 -> v647 v649 = icmp ult v647, v648 v650 = bint.i8 v649 v651 = uextend.i32 v650 - brz v651, ebb80 - jump ebb74 + brz v651, block80 + jump block74 -ebb74: +block74: v652 = load.i32 v63 v653 = uextend.i64 v652 v655 = ishl v653, v654 @@ -1342,23 +1342,23 @@ ebb74: v661 = iadd_imm.i64 v68, 8 v662 = load.i8 v661 v663 = uextend.i32 v662 - brz v663, ebb75 - jump ebb138 + brz v663, block75 + jump block138 -ebb138: +block138: v664 = global_value.i64 gv48 trap user0 -ebb75: +block75: v665 = load.i64 v68 v690 -> v665 v666 = load.i64 aligned v3 v667 = load.i64 aligned v3+8 v669 = load.i64 v73 call fn70(v71, v72, v669) - jump ebb76 + jump block76 -ebb76: +block76: v670 = load.i64 aligned v71 v671 = load.i64 aligned v71+8 v672 = load.i64 aligned v70 @@ -1373,40 +1373,40 @@ ebb76: v685 = iadd_imm.i64 v74, 8 v686 = load.i8 v685 v687 = uextend.i32 v686 - brz v687, ebb77 - jump ebb137 + brz v687, block77 + jump block137 -ebb137: +block137: v688 = global_value.i64 gv49 trap user0 -ebb77: +block77: v689 = load.i64 v74 v694 = iconst.i64 1 call fn72(v0, v69, v691, v692, v693, v689, v690, v694) - jump ebb78 + jump block78 -ebb78: - jump ebb79 +block78: + jump block79 -ebb79: +block79: return -ebb80: +block80: v697 = uextend.i64 v696 v698 = icmp.i64 ugt v695, v697 v699 = bint.i8 v698 v700 = uextend.i32 v699 - brz v700, ebb96 - jump ebb81 + brz v700, block96 + jump block81 -ebb81: +block81: v701 = iconst.i8 1 v702 = uextend.i32 v701 - brz v702, ebb88 - jump ebb82 + brz v702, block88 + jump block82 -ebb82: +block82: v703 = global_value.i64 gv50 v704 = iadd_imm.i64 v75, 8 v705 = load.i64 v75 @@ -1422,10 +1422,10 @@ ebb82: v713 = icmp_imm eq v712, 0 v714 = bint.i8 v713 v715 = uextend.i32 v714 - brz v715, ebb84 - jump ebb83 + brz v715, block84 + jump block83 -ebb83: +block83: v716 = global_value.i64 gv51 v717 = iconst.i64 3 v720 = iadd_imm.i64 v80, 8 @@ -1435,17 +1435,17 @@ ebb83: v725 -> v723 v724 = func_addr.i64 fn73 call fn74(v83, v721, v724) - jump ebb85 + jump block85 -ebb84: - jump ebb88 +block84: + jump block88 -ebb85: +block85: v726 = func_addr.i64 fn75 call fn76(v84, v725, v726) - jump ebb86 + jump block86 -ebb86: +block86: v727 = iconst.i64 0 v728 = imul_imm v727, 16 v729 = iadd.i64 v79, v728 @@ -1458,21 +1458,21 @@ ebb86: v736 = load.i64 aligned v84+8 v737 = iconst.i64 2 call fn77(v76, v77, v78) - jump ebb87 + jump block87 -ebb87: +block87: v738 = global_value.i64 gv52 call fn78(v76, v738) v739 = global_value.i64 gv53 trap user65535 -ebb88: +block88: v740 = iconst.i8 1 v741 = uextend.i32 v740 - brz v741, ebb95(v1030, v1031, v1041, v1046, v1054, v1059) - jump ebb89 + brz v741, block95(v1030, v1031, v1041, v1046, v1054, v1059) + jump block89 -ebb89: +block89: v742 = global_value.i64 gv54 v743 = iadd_imm.i64 v85, 8 v744 = load.i64 v85 @@ -1488,10 +1488,10 @@ ebb89: v752 = icmp_imm eq v751, 0 v753 = bint.i8 v752 v754 = uextend.i32 v753 - brz v754, ebb91 - jump ebb90 + brz v754, block91 + jump block90 -ebb90: +block90: v755 = global_value.i64 gv55 v756 = iconst.i64 3 v759 = iadd_imm.i64 v90, 8 @@ -1501,17 +1501,17 @@ ebb90: v764 -> v762 v763 = func_addr.i64 fn80 call fn81(v93, v760, v763) - jump ebb92 + jump block92 -ebb91: - jump ebb95(v1030, v1031, v1041, v1046, v1054, v1059) +block91: + jump block95(v1030, v1031, v1041, v1046, v1054, v1059) -ebb92: +block92: v765 = func_addr.i64 fn82 call fn83(v94, v764, v765) - jump ebb93 + jump block93 -ebb93: +block93: v766 = iconst.i64 0 v767 = imul_imm v766, 16 v768 = iadd.i64 v89, v767 @@ -1524,19 +1524,19 @@ ebb93: v775 = load.i64 aligned v94+8 v776 = iconst.i64 2 call fn84(v86, v87, v88) - jump ebb94 + jump block94 -ebb94: +block94: v777 = global_value.i64 gv56 call fn85(v86, v777) v778 = global_value.i64 gv57 trap user65535 -ebb95(v779: i64, v780: i64, v1040: i64, v1045: i64, v1053: i16, v1058: i64): +block95(v779: i64, v780: i64, v1040: i64, v1045: i64, v1053: i16, v1058: i64): v781 = iconst.i64 1 - jump ebb99(v779, v780, v781, v1040, v1045, v1053, v1058) + jump block99(v779, v780, v781, v1040, v1045, v1053, v1058) -ebb96: +block96: v782 = iconst.i16 1 v783 = load.i16 v62 v784 = isub v783, v782 @@ -1547,34 +1547,34 @@ ebb96: v789 = iadd_imm.i64 v95, 2 v790 = load.i8 v789 v791 = uextend.i32 v790 - brz v791, ebb97 - jump ebb136 + brz v791, block97 + jump block136 -ebb136: +block136: v792 = global_value.i64 gv58 trap user0 -ebb97: +block97: v793 = load.i16 aligned v95 v794 = iconst.i32 10 v795 = iconst.i32 0 v796 = icmp eq v794, v795 v797 = bint.i8 v796 v798 = uextend.i32 v797 - brz v798, ebb98 - jump ebb135 + brz v798, block98 + jump block135 -ebb135: +block135: v799 = global_value.i64 gv59 trap user0 -ebb98: +block98: v800 = iconst.i32 10 v801 = load.i32 v63 v802 = udiv v801, v800 - jump ebb62(v803, v1010, v1014, v1017, v1020, v1023, v1026, v1029, v1034) + jump block62(v803, v1010, v1014, v1017, v1020, v1023, v1026, v1029, v1034) -ebb99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v1057: i64): +block99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v1057: i64): v817 -> v1035 v830 -> v1037 v844 -> v1039 @@ -1603,14 +1603,14 @@ ebb99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v10 v812 = iadd_imm.i64 v96, 8 v813 = load.i8 v812 v814 = uextend.i32 v813 - brz v814, ebb100 - jump ebb134 + brz v814, block100 + jump block134 -ebb134: +block134: v815 = global_value.i64 gv60 trap user0 -ebb100: +block100: v816 = load.i64 v96 v843 -> v816 v856 -> v816 @@ -1625,14 +1625,14 @@ ebb100: v825 = iadd_imm.i64 v97, 8 v826 = load.i8 v825 v827 = uextend.i32 v826 - brz v827, ebb101 - jump ebb133 + brz v827, block101 + jump block133 -ebb133: +block133: v828 = global_value.i64 gv61 trap user0 -ebb101: +block101: v829 = load.i64 v97 v935 -> v829 v962 -> v829 @@ -1649,14 +1649,14 @@ ebb101: v838 = iadd_imm.i64 v98, 8 v839 = load.i8 v838 v840 = uextend.i32 v839 - brz v840, ebb102 - jump ebb132 + brz v840, block102 + jump block132 -ebb132: +block132: v841 = global_value.i64 gv62 trap user0 -ebb102: +block102: v842 = load.i64 v98 v976 -> v842 v989 -> v842 @@ -1671,14 +1671,14 @@ ebb102: v851 = iadd_imm.i64 v99, 8 v852 = load.i8 v851 v853 = uextend.i32 v852 - brz v853, ebb103 - jump ebb131 + brz v853, block103 + jump block131 -ebb131: +block131: v854 = global_value.i64 gv63 trap user0 -ebb103: +block103: v855 = load.i64 v99 v886 -> v855 v858 = iconst.i64 1 @@ -1691,14 +1691,14 @@ ebb103: v865 = iadd_imm.i64 v100, 8 v866 = load.i8 v865 v867 = uextend.i32 v866 - brz v867, ebb104 - jump ebb130 + brz v867, block104 + jump block130 -ebb130: +block130: v868 = global_value.i64 gv64 trap user0 -ebb104: +block104: v869 = load.i64 v100 v870 = iconst.i64 1 v871 = isub v869, v870 @@ -1710,14 +1710,14 @@ ebb104: v877 = iadd_imm.i64 v101, 8 v878 = load.i8 v877 v879 = uextend.i32 v878 - brz v879, ebb105 - jump ebb129 + brz v879, block105 + jump block129 -ebb129: +block129: v880 = global_value.i64 gv65 trap user0 -ebb105: +block105: v881 = load.i64 v101 v883 = band.i64 v882, v881 v934 -> v883 @@ -1727,10 +1727,10 @@ ebb105: v1048 -> v883 v884 = iconst.i8 1 v885 = uextend.i32 v884 - brz v885, ebb109(v855) - jump ebb106 + brz v885, block109(v855) + jump block106 -ebb106: +block106: v887 = iconst.i64 10 v888 = icmp.i64 ult v886, v887 v889 = bint.i8 v888 @@ -1738,18 +1738,18 @@ ebb106: v891 = icmp_imm eq v890, 0 v892 = bint.i8 v891 v893 = uextend.i32 v892 - brz v893, ebb108 - jump ebb107 + brz v893, block108 + jump block107 -ebb107: +block107: v894 = global_value.i64 gv66 v895 = global_value.i64 gv67 trap user65535 -ebb108: - jump ebb109(v886) +block108: + jump block109(v886) -ebb109(v896: i64): +block109(v896: i64): v897 = ireduce.i8 v896 v898 = iconst.i8 48 v899 = iadd v898, v897 @@ -1760,27 +1760,27 @@ ebb109(v896: i64): v904 = iadd_imm.i64 v102, 1 v905 = load.i8 v904 v906 = uextend.i32 v905 - brz v906, ebb110 - jump ebb128 + brz v906, block110 + jump block128 -ebb128: +block128: v907 = global_value.i64 gv68 trap user0 -ebb110: +block110: v909 = load.i64 v3 v910 = load.i64 v3+8 v911 = icmp.i64 ult v908, v910 v912 = bint.i8 v911 v913 = uextend.i32 v912 - brnz v913, ebb111 - jump ebb127 + brnz v913, block111 + jump block127 -ebb127: +block127: v914 = global_value.i64 gv69 trap user0 -ebb111: +block111: v915 = load.i64 v3 v916 = load.i64 v3+8 v918 = imul_imm.i64 v917, 1 @@ -1796,24 +1796,24 @@ ebb111: v929 = iadd_imm.i64 v103, 8 v930 = load.i8 v929 v931 = uextend.i32 v930 - brz v931, ebb112 - jump ebb126 + brz v931, block112 + jump block126 -ebb126: +block126: v932 = global_value.i64 gv70 trap user0 -ebb112: +block112: v933 = load.i64 v103 v954 -> v933 v1047 -> v933 v936 = icmp.i64 ult v934, v935 v937 = bint.i8 v936 v938 = uextend.i32 v937 - brz v938, ebb119 - jump ebb113 + brz v938, block119 + jump block113 -ebb113: +block113: v940 = iconst.i64 1 v941 = ishl v940, v939 v942 = iconst.i8 0 @@ -1824,23 +1824,23 @@ ebb113: v947 = iadd_imm.i64 v104, 8 v948 = load.i8 v947 v949 = uextend.i32 v948 - brz v949, ebb114 - jump ebb125 + brz v949, block114 + jump block125 -ebb125: +block125: v950 = global_value.i64 gv71 trap user0 -ebb114: +block114: v951 = load.i64 v104 v988 -> v951 v952 = load.i64 aligned v3 v953 = load.i64 aligned v3+8 v955 = load.i64 v109 call fn101(v107, v108, v955) - jump ebb115 + jump block115 -ebb115: +block115: v956 = load.i64 aligned v107 v957 = load.i64 aligned v107+8 v958 = load.i64 aligned v106 @@ -1855,14 +1855,14 @@ ebb115: v971 = iadd_imm.i64 v110, 8 v972 = load.i8 v971 v973 = uextend.i32 v972 - brz v973, ebb116 - jump ebb123 + brz v973, block116 + jump block123 -ebb123: +block123: v974 = global_value.i64 gv72 trap user0 -ebb116: +block116: v975 = load.i64 v110 v977 = imul v975, v976 v978 = iconst.i8 0 @@ -1873,22 +1873,22 @@ ebb116: v983 = iadd_imm.i64 v111, 8 v984 = load.i8 v983 v985 = uextend.i32 v984 - brz v985, ebb117 - jump ebb122 + brz v985, block117 + jump block122 -ebb122: +block122: v986 = global_value.i64 gv73 trap user0 -ebb117: +block117: v987 = load.i64 v111 call fn104(v0, v105, v990, v991, v992, v987, v988, v989) - jump ebb118 + jump block118 -ebb118: - jump ebb79 +block118: + jump block79 -ebb119: +block119: v993 = iconst.i16 1 v994 = load.i16 v62 v995 = isub v994, v993 @@ -1899,14 +1899,14 @@ ebb119: v1000 = iadd_imm.i64 v112, 2 v1001 = load.i8 v1000 v1002 = uextend.i32 v1001 - brz v1002, ebb120 - jump ebb121 + brz v1002, block120 + jump block121 -ebb121: +block121: v1003 = global_value.i64 gv74 trap user0 -ebb120: +block120: v1004 = load.i16 aligned v112 - jump ebb99(v1005, v1036, v1038, v1042, v1047, v1055, v1060) + jump block99(v1005, v1036, v1038, v1042, v1047, v1055, v1060) } diff --git a/cranelift/tests/bugpoint_test_expected.clif b/cranelift/tests/bugpoint_test_expected.clif index 0382b856f1..b2ca38a064 100644 --- a/cranelift/tests/bugpoint_test_expected.clif +++ b/cranelift/tests/bugpoint_test_expected.clif @@ -2,7 +2,7 @@ function u0:0(i64, i64, i64) system_v { sig0 = (i64, i64, i16, i64, i64, i64, i64, i64) system_v fn0 = u0:95 sig0 -ebb0(v0: i64, v1: i64, v2: i64): +block0(v0: i64, v1: i64, v2: i64): v113 -> v1 v124 -> v1 v136 -> v1 @@ -17,7 +17,7 @@ ebb0(v0: i64, v1: i64, v2: i64): v105 = iconst.i64 0 trap user0 -ebb99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v1057: i64): +block99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v1057: i64): v817 -> v1035 v830 -> v1037 v844 -> v1039 @@ -38,7 +38,7 @@ ebb99(v804: i64, v1035: i64, v1037: i64, v1039: i64, v1044: i64, v1052: i16, v10 v1060 -> v1057 trap user0 -ebb101: +block101: v829 = iconst.i64 0 v935 -> v829 v962 -> v829 @@ -47,7 +47,7 @@ ebb101: v1049 -> v829 trap user0 -ebb102: +block102: v842 = iconst.i64 0 v976 -> v842 v989 -> v842 @@ -55,7 +55,7 @@ ebb102: v1061 -> v842 trap user0 -ebb105: +block105: v883 = iconst.i64 0 v934 -> v883 v961 -> v883 @@ -64,12 +64,12 @@ ebb105: v1048 -> v883 trap user0 -ebb114: +block114: v951 = iconst.i64 0 v988 -> v951 trap user0 -ebb117: +block117: v987 = iconst.i64 0 call fn0(v0, v105, v1052, v883, v829, v987, v951, v842) trap user0 diff --git a/cranelift/umbrella/src/lib.rs b/cranelift/umbrella/src/lib.rs index f97ed443ce..46582c9555 100644 --- a/cranelift/umbrella/src/lib.rs +++ b/cranelift/umbrella/src/lib.rs @@ -37,7 +37,7 @@ pub mod prelude { pub use crate::codegen::ir::immediates::{Ieee32, Ieee64, Imm64, Uimm64}; pub use crate::codegen::ir::types; pub use crate::codegen::ir::{ - AbiParam, Ebb, ExtFuncData, ExternalName, GlobalValueData, InstBuilder, JumpTableData, + AbiParam, Block, ExtFuncData, ExternalName, GlobalValueData, InstBuilder, JumpTableData, MemFlags, Signature, StackSlotData, StackSlotKind, TrapCode, Type, Value, }; pub use crate::codegen::isa; diff --git a/cranelift/wasm/src/code_translator.rs b/cranelift/wasm/src/code_translator.rs index a767f093db..58b6eb1771 100644 --- a/cranelift/wasm/src/code_translator.rs +++ b/cranelift/wasm/src/code_translator.rs @@ -26,7 +26,7 @@ use super::{hash_map, HashMap}; use crate::environ::{FuncEnvironment, GlobalVariable, ReturnMode, WasmResult}; use crate::state::{ControlStackFrame, ElseData, FuncTranslationState, ModuleTranslationState}; use crate::translation_utils::{ - blocktype_params_results, ebb_with_params, f32_translation, f64_translation, + block_with_params, blocktype_params_results, f32_translation, f64_translation, }; use crate::translation_utils::{FuncIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex}; use crate::wasm_unsupported; @@ -147,32 +147,34 @@ pub fn translate_operator( state.reachable = false; } /***************************** Control flow blocks ********************************** - * When starting a control flow block, we create a new `Ebb` that will hold the code + * When starting a control flow block, we create a new `Block` that will hold the code * after the block, and we push a frame on the control stack. Depending on the type - * of block, we create a new `Ebb` for the body of the block with an associated + * of block, we create a new `Block` for the body of the block with an associated * jump instruction. * * The `End` instruction pops the last control frame from the control stack, seals * the destination block (since `br` instructions targeting it only appear inside the * block and have already been translated) and modify the value stack to use the - * possible `Ebb`'s arguments values. + * possible `Block`'s arguments values. ***********************************************************************************/ Operator::Block { ty } => { let (params, results) = blocktype_params_results(module_translation_state, *ty)?; - let next = ebb_with_params(builder, results, environ)?; + let next = block_with_params(builder, results, environ)?; state.push_block(next, params.len(), results.len()); } Operator::Loop { ty } => { let (params, results) = blocktype_params_results(module_translation_state, *ty)?; - let loop_body = ebb_with_params(builder, params, environ)?; - let next = ebb_with_params(builder, results, environ)?; + let loop_body = block_with_params(builder, params, environ)?; + let next = block_with_params(builder, results, environ)?; builder.ins().jump(loop_body, state.peekn(params.len())); state.push_loop(loop_body, next, params.len(), results.len()); - // Pop the initial `Ebb` actuals and replace them with the `Ebb`'s + // Pop the initial `Block` actuals and replace them with the `Block`'s // params since control flow joins at the top of the loop. state.popn(params.len()); - state.stack.extend_from_slice(builder.ebb_params(loop_body)); + state + .stack + .extend_from_slice(builder.block_params(loop_body)); builder.switch_to_block(loop_body); environ.translate_loop_header(builder.cursor())?; @@ -183,12 +185,12 @@ pub fn translate_operator( let (params, results) = blocktype_params_results(module_translation_state, *ty)?; let (destination, else_data) = if params == results { // It is possible there is no `else` block, so we will only - // allocate an ebb for it if/when we find the `else`. For now, + // allocate an block for it if/when we find the `else`. For now, // we if the condition isn't true, then we jump directly to the - // destination ebb following the whole `if...end`. If we do end - // up discovering an `else`, then we will allocate an ebb for it + // destination block following the whole `if...end`. If we do end + // up discovering an `else`, then we will allocate an block for it // and go back and patch the jump. - let destination = ebb_with_params(builder, results, environ)?; + let destination = block_with_params(builder, results, environ)?; let branch_inst = builder .ins() .brz(val, destination, state.peekn(params.len())); @@ -196,8 +198,8 @@ pub fn translate_operator( } else { // The `if` type signature is not valid without an `else` block, // so we eagerly allocate the `else` block here. - let destination = ebb_with_params(builder, results, environ)?; - let else_block = ebb_with_params(builder, params, environ)?; + let destination = block_with_params(builder, results, environ)?; + let else_block = block_with_params(builder, params, environ)?; builder .ins() .brz(val, else_block, state.peekn(params.len())); @@ -205,12 +207,12 @@ pub fn translate_operator( (destination, ElseData::WithElse { else_block }) }; - let next_ebb = builder.create_ebb(); - builder.ins().jump(next_ebb, &[]); - builder.seal_block(next_ebb); // Only predecessor is the current block. - builder.switch_to_block(next_ebb); + let next_block = builder.create_block(); + builder.ins().jump(next_block, &[]); + builder.seal_block(next_block); // Only predecessor is the current block. + builder.switch_to_block(next_block); - // Here we append an argument to an Ebb targeted by an argumentless jump instruction + // Here we append an argument to an Block targeted by an argumentless jump instruction // But in fact there are two cases: // - either the If does not have a Else clause, in that case ty = EmptyBlock // and we add nothing; @@ -239,20 +241,20 @@ pub fn translate_operator( // We have a branch from the head of the `if` to the `else`. state.reachable = true; - // Ensure we have an ebb for the `else` block (it may have + // Ensure we have an block for the `else` block (it may have // already been pre-allocated, see `ElseData` for details). - let else_ebb = match *else_data { + let else_block = match *else_data { ElseData::NoElse { branch_inst } => { let (params, _results) = blocktype_params_results(module_translation_state, blocktype)?; debug_assert_eq!(params.len(), num_return_values); - let else_ebb = ebb_with_params(builder, params, environ)?; + let else_block = block_with_params(builder, params, environ)?; builder.ins().jump(destination, state.peekn(params.len())); state.popn(params.len()); - builder.change_jump_destination(branch_inst, else_ebb); - builder.seal_block(else_ebb); - else_ebb + builder.change_jump_destination(branch_inst, else_block); + builder.seal_block(else_block); + else_block } ElseData::WithElse { else_block } => { builder @@ -273,7 +275,7 @@ pub fn translate_operator( // `if` so that we wouldn't have to save the parameters in the // `ControlStackFrame` as another `Vec` allocation. - builder.switch_to_block(else_ebb); + builder.switch_to_block(else_block); // We don't bother updating the control frame's `ElseData` // to `WithElse` because nothing else will read it. @@ -284,13 +286,13 @@ pub fn translate_operator( } Operator::End => { let frame = state.control_stack.pop().unwrap(); - let next_ebb = frame.following_code(); + let next_block = frame.following_code(); if !builder.is_unreachable() || !builder.is_pristine() { let return_count = frame.num_return_values(); let return_args = state.peekn_mut(return_count); - let next_ebb_types = builder.func.dfg.ebb_param_types(next_ebb); - bitcast_arguments(return_args, &next_ebb_types, builder); + let next_block_types = builder.func.dfg.block_param_types(next_block); + bitcast_arguments(return_args, &next_block_types, builder); builder.ins().jump(frame.following_code(), return_args); // You might expect that if we just finished an `if` block that // didn't have a corresponding `else` block, then we would clean @@ -299,33 +301,35 @@ pub fn translate_operator( // since we truncate the stack back to the original height // below. } - builder.switch_to_block(next_ebb); - builder.seal_block(next_ebb); + builder.switch_to_block(next_block); + builder.seal_block(next_block); // If it is a loop we also have to seal the body loop block if let ControlStackFrame::Loop { header, .. } = frame { builder.seal_block(header) } state.stack.truncate(frame.original_stack_size()); - state.stack.extend_from_slice(builder.ebb_params(next_ebb)); + state + .stack + .extend_from_slice(builder.block_params(next_block)); } /**************************** Branch instructions ********************************* * The branch instructions all have as arguments a target nesting level, which * corresponds to how many control stack frames do we have to pop to get the - * destination `Ebb`. + * destination `Block`. * - * Once the destination `Ebb` is found, we sometimes have to declare a certain depth + * Once the destination `Block` is found, we sometimes have to declare a certain depth * of the stack unreachable, because some branch instructions are terminator. * * The `br_table` case is much more complicated because Cranelift's `br_table` instruction * does not support jump arguments like all the other branch instructions. That is why, in * the case where we would use jump arguments for every other branch instructions, we - * need to split the critical edges leaving the `br_tables` by creating one `Ebb` per - * table destination; the `br_table` will point to these newly created `Ebbs` and these - * `Ebb`s contain only a jump instruction pointing to the final destination, this time with + * need to split the critical edges leaving the `br_tables` by creating one `Block` per + * table destination; the `br_table` will point to these newly created `Blocks` and these + * `Block`s contain only a jump instruction pointing to the final destination, this time with * jump arguments. * * This system is also implemented in Cranelift's SSA construction algorithm, because - * `use_var` located in a destination `Ebb` of a `br_table` might trigger the addition + * `use_var` located in a destination `Block` of a `br_table` might trigger the addition * of jump arguments in each predecessor branch instruction, one of which might be a * `br_table`. ***********************************************************************************/ @@ -345,7 +349,7 @@ pub fn translate_operator( // Bitcast any vector arguments to their default type, I8X16, before jumping. let destination_args = state.peekn_mut(return_count); - let destination_types = builder.func.dfg.ebb_param_types(br_destination); + let destination_types = builder.func.dfg.block_param_types(br_destination); bitcast_arguments( destination_args, &destination_types[..return_count], @@ -379,53 +383,53 @@ pub fn translate_operator( if jump_args_count == 0 { // No jump arguments for depth in &*depths { - let ebb = { + let block = { let i = state.control_stack.len() - 1 - (*depth as usize); let frame = &mut state.control_stack[i]; frame.set_branched_to_exit(); frame.br_destination() }; - data.push_entry(ebb); + data.push_entry(block); } let jt = builder.create_jump_table(data); - let ebb = { + let block = { let i = state.control_stack.len() - 1 - (default as usize); let frame = &mut state.control_stack[i]; frame.set_branched_to_exit(); frame.br_destination() }; - builder.ins().br_table(val, ebb, jt); + builder.ins().br_table(val, block, jt); } else { // Here we have jump arguments, but Cranelift's br_table doesn't support them // We then proceed to split the edges going out of the br_table let return_count = jump_args_count; - let mut dest_ebb_sequence = vec![]; - let mut dest_ebb_map = HashMap::new(); + let mut dest_block_sequence = vec![]; + let mut dest_block_map = HashMap::new(); for depth in &*depths { - let branch_ebb = match dest_ebb_map.entry(*depth as usize) { + let branch_block = match dest_block_map.entry(*depth as usize) { hash_map::Entry::Occupied(entry) => *entry.get(), hash_map::Entry::Vacant(entry) => { - let ebb = builder.create_ebb(); - dest_ebb_sequence.push((*depth as usize, ebb)); - *entry.insert(ebb) + let block = builder.create_block(); + dest_block_sequence.push((*depth as usize, block)); + *entry.insert(block) } }; - data.push_entry(branch_ebb); + data.push_entry(branch_block); } - let default_branch_ebb = match dest_ebb_map.entry(default as usize) { + let default_branch_block = match dest_block_map.entry(default as usize) { hash_map::Entry::Occupied(entry) => *entry.get(), hash_map::Entry::Vacant(entry) => { - let ebb = builder.create_ebb(); - dest_ebb_sequence.push((default as usize, ebb)); - *entry.insert(ebb) + let block = builder.create_block(); + dest_block_sequence.push((default as usize, block)); + *entry.insert(block) } }; let jt = builder.create_jump_table(data); - builder.ins().br_table(val, default_branch_ebb, jt); - for (depth, dest_ebb) in dest_ebb_sequence { - builder.switch_to_block(dest_ebb); - builder.seal_block(dest_ebb); - let real_dest_ebb = { + builder.ins().br_table(val, default_branch_block, jt); + for (depth, dest_block) in dest_block_sequence { + builder.switch_to_block(dest_block); + builder.seal_block(dest_block); + let real_dest_block = { let i = state.control_stack.len() - 1 - depth; let frame = &mut state.control_stack[i]; frame.set_branched_to_exit(); @@ -434,14 +438,14 @@ pub fn translate_operator( // Bitcast any vector arguments to their default type, I8X16, before jumping. let destination_args = state.peekn_mut(return_count); - let destination_types = builder.func.dfg.ebb_param_types(real_dest_ebb); + let destination_types = builder.func.dfg.block_param_types(real_dest_block); bitcast_arguments( destination_args, &destination_types[..return_count], builder, ); - builder.ins().jump(real_dest_ebb, destination_args); + builder.ins().jump(real_dest_block, destination_args); } state.popn(return_count); } @@ -1498,7 +1502,7 @@ fn translate_unreachable_operator( // Push a placeholder control stack entry. The if isn't reachable, // so we don't have any branches anywhere. state.push_if( - ir::Ebb::reserved_value(), + ir::Block::reserved_value(), ElseData::NoElse { branch_inst: ir::Inst::reserved_value(), }, @@ -1508,7 +1512,7 @@ fn translate_unreachable_operator( ); } Operator::Loop { ty: _ } | Operator::Block { ty: _ } => { - state.push_block(ir::Ebb::reserved_value(), 0, 0); + state.push_block(ir::Block::reserved_value(), 0, 0); } Operator::Else => { let i = state.control_stack.len() - 1; @@ -1527,21 +1531,21 @@ fn translate_unreachable_operator( // We have a branch from the head of the `if` to the `else`. state.reachable = true; - let else_ebb = match *else_data { + let else_block = match *else_data { ElseData::NoElse { branch_inst } => { let (params, _results) = blocktype_params_results(module_translation_state, blocktype)?; - let else_ebb = ebb_with_params(builder, params, environ)?; + let else_block = block_with_params(builder, params, environ)?; // We change the target of the branch instruction. - builder.change_jump_destination(branch_inst, else_ebb); - builder.seal_block(else_ebb); - else_ebb + builder.change_jump_destination(branch_inst, else_block); + builder.seal_block(else_block); + else_block } ElseData::WithElse { else_block } => else_block, }; - builder.switch_to_block(else_ebb); + builder.switch_to_block(else_block); // Again, no need to push the parameters for the `else`, // since we already did when we saw the original `if`. See @@ -1596,7 +1600,7 @@ fn translate_unreachable_operator( // And add the return values of the block but only if the next block is reachable // (which corresponds to testing if the stack depth is 1) - stack.extend_from_slice(builder.ebb_params(frame.following_code())); + stack.extend_from_slice(builder.block_params(frame.following_code())); state.reachable = true; } } @@ -1736,21 +1740,21 @@ fn translate_br_if( let (br_destination, inputs) = translate_br_if_args(relative_depth, state); // Bitcast any vector arguments to their default type, I8X16, before jumping. - let destination_types = builder.func.dfg.ebb_param_types(br_destination); + let destination_types = builder.func.dfg.block_param_types(br_destination); bitcast_arguments(inputs, &destination_types[..inputs.len()], builder); builder.ins().brnz(val, br_destination, inputs); - let next_ebb = builder.create_ebb(); - builder.ins().jump(next_ebb, &[]); - builder.seal_block(next_ebb); // The only predecessor is the current block. - builder.switch_to_block(next_ebb); + let next_block = builder.create_block(); + builder.ins().jump(next_block, &[]); + builder.seal_block(next_block); // The only predecessor is the current block. + builder.switch_to_block(next_block); } fn translate_br_if_args( relative_depth: u32, state: &mut FuncTranslationState, -) -> (ir::Ebb, &mut [ir::Value]) { +) -> (ir::Block, &mut [ir::Value]) { let i = state.control_stack.len() - 1 - (relative_depth as usize); let (return_count, br_destination) = { let frame = &mut state.control_stack[i]; @@ -1973,7 +1977,7 @@ fn pop2_with_bitcast( /// A helper for bitcasting a sequence of values (e.g. function arguments). If a value is a /// vector type that does not match its expected type, this will modify the value in place to point /// to the result of a `raw_bitcast`. This conversion is necessary to translate Wasm code that -/// uses `V128` as function parameters (or implicitly in EBB parameters) and still use specific +/// uses `V128` as function parameters (or implicitly in block parameters) and still use specific /// CLIF types (e.g. `I32X4`) in the function body. pub fn bitcast_arguments( arguments: &mut [Value], diff --git a/cranelift/wasm/src/func_translator.rs b/cranelift/wasm/src/func_translator.rs index cc61b28c9b..27431be148 100644 --- a/cranelift/wasm/src/func_translator.rs +++ b/cranelift/wasm/src/func_translator.rs @@ -10,7 +10,7 @@ use crate::state::{FuncTranslationState, ModuleTranslationState}; use crate::translation_utils::get_vmctx_value_label; use crate::wasm_unsupported; use cranelift_codegen::entity::EntityRef; -use cranelift_codegen::ir::{self, Ebb, InstBuilder, ValueLabel}; +use cranelift_codegen::ir::{self, Block, InstBuilder, ValueLabel}; use cranelift_codegen::timing; use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable}; use log::info; @@ -84,27 +84,27 @@ impl FuncTranslator { func.name, func.signature ); - debug_assert_eq!(func.dfg.num_ebbs(), 0, "Function must be empty"); + debug_assert_eq!(func.dfg.num_blocks(), 0, "Function must be empty"); debug_assert_eq!(func.dfg.num_insts(), 0, "Function must be empty"); // This clears the `FunctionBuilderContext`. let mut builder = FunctionBuilder::new(func, &mut self.func_ctx); builder.set_srcloc(cur_srcloc(&reader)); - let entry_block = builder.create_ebb(); - builder.append_ebb_params_for_function_params(entry_block); + let entry_block = builder.create_block(); + builder.append_block_params_for_function_params(entry_block); builder.switch_to_block(entry_block); // This also creates values for the arguments. builder.seal_block(entry_block); // Declare all predecessors known. // Make sure the entry block is inserted in the layout before we make any callbacks to // `environ`. The callback functions may need to insert things in the entry block. - builder.ensure_inserted_ebb(); + builder.ensure_inserted_block(); let num_params = declare_wasm_parameters(&mut builder, entry_block, environ); // Set up the translation state with a single pushed control block representing the whole // function and its return values. - let exit_block = builder.create_ebb(); - builder.append_ebb_params_for_function_returns(exit_block); + let exit_block = builder.create_block(); + builder.append_block_params_for_function_returns(exit_block); self.state.initialize(&builder.func.signature, exit_block); parse_local_decls(&mut reader, &mut builder, num_params, environ)?; @@ -126,7 +126,7 @@ impl FuncTranslator { /// Return the number of local variables declared. fn declare_wasm_parameters( builder: &mut FunctionBuilder, - entry_block: Ebb, + entry_block: Block, environ: &FE, ) -> usize { let sig_len = builder.func.signature.params.len(); @@ -141,11 +141,11 @@ fn declare_wasm_parameters( builder.declare_var(local, param_type.value_type); next_local += 1; - let param_value = builder.ebb_params(entry_block)[i]; + let param_value = builder.block_params(entry_block)[i]; builder.def_var(local, param_value); } if param_type.purpose == ir::ArgumentPurpose::VMContext { - let param_value = builder.ebb_params(entry_block)[i]; + let param_value = builder.block_params(entry_block)[i]; builder.set_val_label(param_value, get_vmctx_value_label()); } } diff --git a/cranelift/wasm/src/state/func_state.rs b/cranelift/wasm/src/state/func_state.rs index 0ea0e13ea9..81a0d35e97 100644 --- a/cranelift/wasm/src/state/func_state.rs +++ b/cranelift/wasm/src/state/func_state.rs @@ -9,7 +9,7 @@ use crate::environ::{FuncEnvironment, GlobalVariable, WasmResult}; use crate::translation_utils::{FuncIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex}; use crate::{HashMap, Occupied, Vacant}; -use cranelift_codegen::ir::{self, Ebb, Inst, Value}; +use cranelift_codegen::ir::{self, Block, Inst, Value}; use std::vec::Vec; /// Information about the presence of an associated `else` for an `if`, or the @@ -35,24 +35,24 @@ pub enum ElseData { /// these cases, we pre-allocate the `else` block. WithElse { /// This is the `else` block. - else_block: Ebb, + else_block: Block, }, } /// A control stack frame can be an `if`, a `block` or a `loop`, each one having the following /// fields: /// -/// - `destination`: reference to the `Ebb` that will hold the code after the control block; +/// - `destination`: reference to the `Block` that will hold the code after the control block; /// - `num_return_values`: number of values returned by the control block; /// - `original_stack_size`: size of the value stack at the beginning of the control block. /// /// Moreover, the `if` frame has the `branch_inst` field that points to the `brz` instruction /// separating the `true` and `false` branch. The `loop` frame has a `header` field that references -/// the `Ebb` that contains the beginning of the body of the loop. +/// the `Block` that contains the beginning of the body of the loop. #[derive(Debug)] pub enum ControlStackFrame { If { - destination: Ebb, + destination: Block, else_data: ElseData, num_param_values: usize, num_return_values: usize, @@ -72,15 +72,15 @@ pub enum ControlStackFrame { // `state.reachable` when we hit the `end` in the `if .. else .. end`. }, Block { - destination: Ebb, + destination: Block, num_param_values: usize, num_return_values: usize, original_stack_size: usize, exit_is_branched_to: bool, }, Loop { - destination: Ebb, - header: Ebb, + destination: Block, + header: Block, num_param_values: usize, num_return_values: usize, original_stack_size: usize, @@ -115,14 +115,14 @@ impl ControlStackFrame { } => num_param_values, } } - pub fn following_code(&self) -> Ebb { + pub fn following_code(&self) -> Block { match *self { Self::If { destination, .. } | Self::Block { destination, .. } | Self::Loop { destination, .. } => destination, } } - pub fn br_destination(&self) -> Ebb { + pub fn br_destination(&self) -> Block { match *self { Self::If { destination, .. } | Self::Block { destination, .. } => destination, Self::Loop { header, .. } => header, @@ -254,7 +254,7 @@ impl FuncTranslationState { /// /// This resets the state to containing only a single block representing the whole function. /// The exit block is the last block in the function which will contain the return instruction. - pub(crate) fn initialize(&mut self, sig: &ir::Signature, exit_block: Ebb) { + pub(crate) fn initialize(&mut self, sig: &ir::Signature, exit_block: Block) { self.clear(); self.push_block( exit_block, @@ -343,7 +343,7 @@ impl FuncTranslationState { /// Push a block on the control stack. pub(crate) fn push_block( &mut self, - following_code: Ebb, + following_code: Block, num_param_types: usize, num_result_types: usize, ) { @@ -360,8 +360,8 @@ impl FuncTranslationState { /// Push a loop on the control stack. pub(crate) fn push_loop( &mut self, - header: Ebb, - following_code: Ebb, + header: Block, + following_code: Block, num_param_types: usize, num_result_types: usize, ) { @@ -378,7 +378,7 @@ impl FuncTranslationState { /// Push an if on the control stack. pub(crate) fn push_if( &mut self, - destination: Ebb, + destination: Block, else_data: ElseData, num_param_types: usize, num_result_types: usize, diff --git a/cranelift/wasm/src/translation_utils.rs b/cranelift/wasm/src/translation_utils.rs index d431c13f29..796cc5e49b 100644 --- a/cranelift/wasm/src/translation_utils.rs +++ b/cranelift/wasm/src/translation_utils.rs @@ -185,42 +185,42 @@ pub fn blocktype_params_results( }) } -/// Create an `Ebb` with the given Wasm parameters. -pub fn ebb_with_params( +/// Create a `Block` with the given Wasm parameters. +pub fn block_with_params( builder: &mut FunctionBuilder, params: &[wasmparser::Type], environ: &PE, -) -> WasmResult { - let ebb = builder.create_ebb(); +) -> WasmResult { + let block = builder.create_block(); for ty in params.iter() { match ty { wasmparser::Type::I32 => { - builder.append_ebb_param(ebb, ir::types::I32); + builder.append_block_param(block, ir::types::I32); } wasmparser::Type::I64 => { - builder.append_ebb_param(ebb, ir::types::I64); + builder.append_block_param(block, ir::types::I64); } wasmparser::Type::F32 => { - builder.append_ebb_param(ebb, ir::types::F32); + builder.append_block_param(block, ir::types::F32); } wasmparser::Type::F64 => { - builder.append_ebb_param(ebb, ir::types::F64); + builder.append_block_param(block, ir::types::F64); } wasmparser::Type::AnyRef | wasmparser::Type::AnyFunc | wasmparser::Type::NullRef => { - builder.append_ebb_param(ebb, environ.reference_type()); + builder.append_block_param(block, environ.reference_type()); } wasmparser::Type::V128 => { - builder.append_ebb_param(ebb, ir::types::I8X16); + builder.append_block_param(block, ir::types::I8X16); } ty => { return Err(wasm_unsupported!( - "ebb_with_params: type {:?} in multi-value block's signature", + "block_with_params: type {:?} in multi-value block's signature", ty )) } } } - Ok(ebb) + Ok(block) } /// Turns a `wasmparser` `f32` into a `Cranelift` one.