Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -4,29 +4,29 @@ target i686
; Test that compare+branch sequences are folded effectively on x86.
function %br_icmp(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
[DynRexOp1icscc#39,%rdx] v2 = icmp slt v0, v1
[Op1t8jccd_long#85] brnz v2, ebb1
[Op1jmpb#eb] jump ebb2
[Op1t8jccd_long#85] brnz v2, block1
[Op1jmpb#eb] jump block2
ebb2:
block2:
[Op1ret#c3] return v1
ebb1:
block1:
[Op1pu_id#b8,%rax] v8 = iconst.i32 3
[Op1ret#c3] return v8
}
; sameln: function %br_icmp
; nextln: ebb0(v0: i32, v1: i32):
; nextln: block0(v0: i32, v1: i32):
; nextln: v9 = ifcmp v0, v1
; nextln: v2 = trueif slt v9
; nextln: brif slt v9, ebb1
; nextln: jump ebb2
; nextln: brif slt v9, block1
; nextln: jump block2
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: return v1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v8 = iconst.i32 3
; nextln: return v8
; nextln: }
@@ -34,29 +34,29 @@ ebb1:
; Use brz instead of brnz, so the condition is inverted.
function %br_icmp_inverse(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
[DynRexOp1icscc#39,%rdx] v2 = icmp slt v0, v1
[Op1t8jccd_long#84] brz v2, ebb1
[Op1jmpb#eb] jump ebb2
[Op1t8jccd_long#84] brz v2, block1
[Op1jmpb#eb] jump block2
ebb2:
block2:
[Op1ret#c3] return v1
ebb1:
block1:
[Op1pu_id#b8,%rax] v8 = iconst.i32 3
[Op1ret#c3] return v8
}
; sameln: function %br_icmp_inverse
; nextln: ebb0(v0: i32, v1: i32):
; nextln: block0(v0: i32, v1: i32):
; nextln: v9 = ifcmp v0, v1
; nextln: v2 = trueif slt v9
; nextln: brif sge v9, ebb1
; nextln: jump ebb2
; nextln: brif sge v9, block1
; nextln: jump block2
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: return v1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v8 = iconst.i32 3
; nextln: return v8
; nextln: }
@@ -64,29 +64,29 @@ ebb1:
; Use icmp_imm instead of icmp.
function %br_icmp_imm(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
[DynRexOp1icscc_ib#7083] v2 = icmp_imm slt v0, 2
[Op1t8jccd_long#84] brz v2, ebb1
[Op1jmpb#eb] jump ebb2
[Op1t8jccd_long#84] brz v2, block1
[Op1jmpb#eb] jump block2
ebb2:
block2:
[Op1ret#c3] return v1
ebb1:
block1:
[Op1pu_id#b8,%rax] v8 = iconst.i32 3
[Op1ret#c3] return v8
}
; sameln: function %br_icmp_imm
; nextln: ebb0(v0: i32, v1: i32):
; nextln: block0(v0: i32, v1: i32):
; nextln: v9 = ifcmp_imm v0, 2
; nextln: v2 = trueif slt v9
; nextln: brif sge v9, ebb1
; nextln: jump ebb2
; nextln: brif sge v9, block1
; nextln: jump block2
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: return v1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v8 = iconst.i32 3
; nextln: return v8
; nextln: }
@@ -94,30 +94,30 @@ ebb1:
; Use fcmp instead of icmp.
function %br_fcmp(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
block0(v0: f32, v1: f32):
[Op2fcscc#42e,%rdx] v2 = fcmp gt v0, v1
[Op1t8jccd_long#84] brz v2, ebb1
[Op1jmpb#eb] jump ebb2
[Op1t8jccd_long#84] brz v2, block1
[Op1jmpb#eb] jump block2
ebb2:
block2:
[Op1ret#c3] return v1
ebb1:
block1:
[Op1pu_id#b8,%rax] v18 = iconst.i32 0x40a8_0000
[Mp2frurm#56e,%xmm0] v8 = bitcast.f32 v18
[Op1ret#c3] return v8
}
; sameln: function %br_fcmp
; nextln: ebb0(v0: f32, v1: f32):
; nextln: block0(v0: f32, v1: f32):
; nextln: v19 = ffcmp v0, v1
; nextln: v2 = trueff gt v19
; nextln: brff ule v19, ebb1
; nextln: jump ebb2
; nextln: brff ule v19, block1
; nextln: jump block2
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: return v1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v18 = iconst.i32 0x40a8_0000
; nextln: v8 = bitcast.f32 v18
; nextln: return v8

View File

@@ -2,7 +2,7 @@ test postopt
target x86_64
function %dual_loads(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
block0(v0: i64, v1: i64):
[DynRexOp1rr#8001] v3 = iadd v0, v1
v4 = load.i64 v3
v5 = uload8.i64 v3
@@ -15,7 +15,7 @@ ebb0(v0: i64, v1: i64):
}
; sameln: function %dual_loads
; nextln: ebb0(v0: i64, v1: i64):
; nextln: block0(v0: i64, v1: i64):
; nextln: v3 = iadd v0, v1
; nextln: v4 = load_complex.i64 v0+v1
; nextln: v5 = uload8_complex.i64 v0+v1
@@ -28,7 +28,7 @@ ebb0(v0: i64, v1: i64):
; nextln: }
function %dual_loads2(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
block0(v0: i64, v1: i64):
[DynRexOp1rr#8001] v3 = iadd v0, v1
v4 = load.i64 v3+1
v5 = uload8.i64 v3+1
@@ -41,7 +41,7 @@ ebb0(v0: i64, v1: i64):
}
; sameln: function %dual_loads2
; nextln: ebb0(v0: i64, v1: i64):
; nextln: block0(v0: i64, v1: i64):
; nextln: v3 = iadd v0, v1
; nextln: v4 = load_complex.i64 v0+v1+1
; nextln: v5 = uload8_complex.i64 v0+v1+1
@@ -54,7 +54,7 @@ ebb0(v0: i64, v1: i64):
; nextln: }
function %dual_stores(i64, i64, i64) {
ebb0(v0: i64, v1: i64, v2: i64):
block0(v0: i64, v1: i64, v2: i64):
[DynRexOp1rr#8001] v3 = iadd v0, v1
[RexOp1st#8089] store.i64 v2, v3
[RexOp1st#88] istore8.i64 v2, v3
@@ -64,7 +64,7 @@ ebb0(v0: i64, v1: i64, v2: i64):
}
; sameln: function %dual_stores
; nextln: ebb0(v0: i64, v1: i64, v2: i64):
; nextln: block0(v0: i64, v1: i64, v2: i64):
; nextln: v3 = iadd v0, v1
; nextln: store_complex v2, v0+v1
; nextln: istore8_complex v2, v0+v1
@@ -74,7 +74,7 @@ ebb0(v0: i64, v1: i64, v2: i64):
; nextln: }
function %dual_stores2(i64, i64, i64) {
ebb0(v0: i64, v1: i64, v2: i64):
block0(v0: i64, v1: i64, v2: i64):
[DynRexOp1rr#8001] v3 = iadd v0, v1
[RexOp1stDisp8#8089] store.i64 v2, v3+1
[RexOp1stDisp8#88] istore8.i64 v2, v3+1
@@ -84,7 +84,7 @@ ebb0(v0: i64, v1: i64, v2: i64):
}
; sameln: function %dual_stores2
; nextln: ebb0(v0: i64, v1: i64, v2: i64):
; nextln: block0(v0: i64, v1: i64, v2: i64):
; nextln: v3 = iadd v0, v1
; nextln: store_complex v2, v0+v1+1
; nextln: istore8_complex v2, v0+v1+1

View File

@@ -4,28 +4,28 @@ target x86_64
; Fold the immediate of an iadd_imm into an address offset.
function u0:0(i64 vmctx) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = iadd_imm.i64 v0, 16
[RexOp1ldDisp8#808b] v2 = load.i64 notrap aligned v1
[Op1ret#c3] return v2
}
; sameln: function u0:0(i64 vmctx) -> i64 fast {
; nextln: ebb0(v0: i64):
; nextln: block0(v0: i64):
; nextln: v1 = iadd_imm v0, 16
; nextln: [RexOp1ldDisp8#808b] v2 = load.i64 notrap aligned v0+16
; nextln: [Op1ret#c3] return v2
; nextln: }
function u0:1(i64, i64 vmctx) {
ebb0(v3: i64, v0: i64):
block0(v3: i64, v0: i64):
v1 = iadd_imm.i64 v0, 16
[RexOp1stDisp8#8089] store.i64 notrap aligned v3, v1
[Op1ret#c3] return
}
; sameln: function u0:1(i64, i64 vmctx) fast {
; nextln: ebb0(v3: i64, v0: i64):
; nextln: block0(v3: i64, v0: i64):
; nextln: v1 = iadd_imm v0, 16
; nextln: [RexOp1stDisp8#8089] store notrap aligned v3, v0+16
; nextln: [Op1ret#c3] return