Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -5,11 +5,11 @@ function %value_aliases(i32, f32, i64 vmctx) baldrdash_system_v {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: f32, v2: i64):
block0(v0: i32, v1: f32, v2: i64):
v3 = iconst.i32 0
jump ebb3(v3)
jump block3(v3)
ebb3(v4: i32):
block3(v4: i32):
v5 = heap_addr.i64 heap0, v4, 1
v6 = load.f32 v5
v7 -> v1
@@ -21,15 +21,15 @@ ebb3(v4: i32):
v12 -> v0
v13 = icmp ult v11, v12
v14 = bint.i32 v13
brnz v14, ebb3(v11)
jump ebb4
brnz v14, block3(v11)
jump block4
ebb4:
jump ebb2
block4:
jump block2
ebb2:
jump ebb1
block2:
jump block1
ebb1:
block1:
return
}

View File

@@ -6,7 +6,7 @@ target riscv32
; regex: RX=%x\d+
function %add(i32, i32) {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v3 = iadd v1, v2
; check: [R#0c,%x5]
; sameln: iadd
@@ -15,7 +15,7 @@ ebb0(v1: i32, v2: i32):
; Function with a dead argument.
function %dead_arg(i32, i32) -> i32{
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
; not: regmove
; check: return v1
return v1
@@ -23,7 +23,7 @@ ebb0(v1: i32, v2: i32):
; Return a value from a different register.
function %move1(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
; not: regmove
; check: regmove v2, %x11 -> %x10
; nextln: return v2
@@ -32,7 +32,7 @@ ebb0(v1: i32, v2: i32):
; Swap two registers.
function %swap(i32, i32) -> i32, i32 {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
; not: regmove
; check: regmove v2, %x11 -> $(tmp=$RX)
; nextln: regmove v1, %x10 -> %x11
@@ -41,40 +41,40 @@ ebb0(v1: i32, v2: i32):
return v2, v1
}
; Return an EBB argument.
function %retebb(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1)
jump ebb1(v2)
; Return a block argument.
function %retblock(i32, i32) -> i32 {
block0(v1: i32, v2: i32):
brnz v1, block1(v1)
jump block1(v2)
ebb1(v10: i32):
block1(v10: i32):
return v10
}
; Pass an EBB argument as a function argument.
function %callebb(i32, i32) -> i32 {
; Pass a block argument as a function argument.
function %callblock(i32, i32) -> i32 {
fn0 = %foo(i32) -> i32
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1)
jump ebb1(v2)
block0(v1: i32, v2: i32):
brnz v1, block1(v1)
jump block1(v2)
ebb1(v10: i32):
block1(v10: i32):
v11 = call fn0(v10)
return v11
}
; Pass an EBB argument as a jump argument.
function %jumpebb(i32, i32) -> i32 {
; Pass a block argument as a jump argument.
function %jumpblock(i32, i32) -> i32 {
fn0 = %foo(i32) -> i32
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1, v2)
jump ebb1(v2, v1)
block0(v1: i32, v2: i32):
brnz v1, block1(v1, v2)
jump block1(v2, v1)
ebb1(v10: i32, v11: i32):
jump ebb2(v10, v11)
block1(v10: i32, v11: i32):
jump block2(v10, v11)
ebb2(v20: i32, v21: i32):
block2(v20: i32, v21: i32):
return v21
}

View File

@@ -5,64 +5,64 @@ target riscv32
; regex: V=v\d+
; regex: WS=\s+
; regex: LOC=%\w+
; regex: EBB=ebb\d+
; regex: BB=block\d+
; This function is already CSSA, so no copies should be inserted.
function %cssa(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
; not: copy
; v0 is used by the branch and passed as an arg - that's no conflict.
brnz v0, ebb1(v0)
jump ebb2
brnz v0, block1(v0)
jump block2
ebb2:
block2:
; v0 is live across the branch above. That's no conflict.
v1 = iadd_imm v0, 7
jump ebb1(v1)
jump block1(v1)
ebb1(v10: i32):
block1(v10: i32):
v11 = iadd_imm v10, 7
return v11
}
function %trivial(i32) -> i32 {
ebb0(v0: i32):
; check: brnz v0, $(splitEdge=$EBB)
brnz v0, ebb1(v0)
jump ebb2
block0(v0: i32):
; check: brnz v0, $(splitEdge=$BB)
brnz v0, block1(v0)
jump block2
ebb2:
block2:
; not: copy
v1 = iadd_imm v0, 7
jump ebb1(v1)
jump block1(v1)
; check: $splitEdge:
; nextln: $(cp1=$V) = copy.i32 v0
; nextln: jump ebb1($cp1)
; nextln: jump block1($cp1)
ebb1(v10: i32):
; Use v0 in the destination EBB causes a conflict.
block1(v10: i32):
; Use v0 in the destination block causes a conflict.
v11 = iadd v10, v0
return v11
}
; A value is used as an SSA argument twice in the same branch.
function %dualuse(i32) -> i32 {
ebb0(v0: i32):
; check: brnz v0, $(splitEdge=$EBB)
brnz v0, ebb1(v0, v0)
jump ebb2
block0(v0: i32):
; check: brnz v0, $(splitEdge=$BB)
brnz v0, block1(v0, v0)
jump block2
ebb2:
block2:
v1 = iadd_imm v0, 7
v2 = iadd_imm v1, 56
jump ebb1(v1, v2)
jump block1(v1, v2)
; check: $splitEdge:
; check: $(cp1=$V) = copy.i32 v0
; nextln: jump ebb1($cp1, v0)
; nextln: jump block1($cp1, v0)
ebb1(v10: i32, v11: i32):
block1(v10: i32, v11: i32):
v12 = iadd v10, v11
return v12
}
@@ -70,26 +70,26 @@ ebb1(v10: i32, v11: i32):
; Interference away from the branch
; The interference can be broken with a copy at either branch.
function %interference(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
; not: copy
; check: brnz v0, $(splitEdge=$EBB)
; check: brnz v0, $(splitEdge=$BB)
; not: copy
brnz v0, ebb1(v0)
jump ebb2
brnz v0, block1(v0)
jump block2
ebb2:
block2:
v1 = iadd_imm v0, 7
; v1 and v0 interfere here:
v2 = iadd_imm v0, 8
; check: $(cp0=$V) = copy v1
; check: jump ebb1($cp0)
jump ebb1(v1)
; check: jump block1($cp0)
jump block1(v1)
; check: $splitEdge:
; not: copy
; nextln: jump ebb1(v0)
; nextln: jump block1(v0)
ebb1(v10: i32):
block1(v10: i32):
; not: copy
v11 = iadd_imm v10, 7
return v11
@@ -97,27 +97,27 @@ ebb1(v10: i32):
; A loop where one induction variable is used as a backedge argument.
function %fibonacci(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
jump ebb1(v1, v2)
jump block1(v1, v2)
; check: $(splitEdge=$EBB):
; check: $(splitEdge=$BB):
; check: $(nv11b=$V) = copy.i32 v11
; not: copy
; check: jump ebb1($nv11b, v12)
; check: jump block1($nv11b, v12)
ebb1(v10: i32, v11: i32):
block1(v10: i32, v11: i32):
; v11 needs to be isolated because it interferes with v10.
; check: ebb1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: block1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: v11 = copy $nv11a
v12 = iadd v10, v11
v13 = icmp ult v12, v0
; check: brnz v13, $splitEdge
brnz v13, ebb1(v11, v12)
jump ebb2
brnz v13, block1(v11, v12)
jump block2
ebb2:
block2:
return v12
}
@@ -128,30 +128,30 @@ ebb2:
function %stackarg(i32, i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss0 = incoming_arg 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32):
block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32):
; check: fill v8
; not: v8
jump ebb1(v8)
jump block1(v8)
ebb1(v10: i32):
block1(v10: i32):
v11 = iadd_imm v10, 1
return v11
}
function %gvn_unremovable_phi(i32) system_v {
ebb0(v0: i32):
block0(v0: i32):
v2 = iconst.i32 0
jump ebb2(v2, v0)
jump block2(v2, v0)
ebb2(v3: i32, v4: i32):
brnz v3, ebb2(v3, v4)
jump ebb3
block2(v3: i32, v4: i32):
brnz v3, block2(v3, v4)
jump block3
ebb3:
block3:
v5 = iconst.i32 1
brnz v3, ebb2(v2, v5)
jump ebb4
brnz v3, block2(v2, v5)
jump block4
ebb4:
block4:
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,83 +5,83 @@ target x86_64 haswell
;
; The (old) coalescer creates a virtual register with two identical values.
function %pr216(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v {
ebb0(v0: i32, v1: i64):
block0(v0: i32, v1: i64):
v3 = iconst.i64 0
v5 = iconst.i32 0
brz v5, ebb3(v3)
jump ebb4(v3, v3)
brz v5, block3(v3)
jump block4(v3, v3)
ebb4(v11: i64, v29: i64):
block4(v11: i64, v29: i64):
v6 = iconst.i32 0
brz v6, ebb14
jump ebb15
brz v6, block14
jump block15
ebb15:
block15:
v9 = iconst.i32 -17
v12 = iconst.i32 0xffff_ffff_ffff_8000
jump ebb9(v12)
jump block9(v12)
ebb9(v10: i32):
brnz v10, ebb8(v9, v11, v11)
jump ebb16
block9(v10: i32):
brnz v10, block8(v9, v11, v11)
jump block16
ebb16:
brz.i32 v9, ebb13
jump ebb17
block16:
brz.i32 v9, block13
jump block17
ebb17:
block17:
v13 = iconst.i32 0
brnz v13, ebb6(v11, v11)
jump ebb18
brnz v13, block6(v11, v11)
jump block18
ebb18:
block18:
v14 = iconst.i32 0
brz v14, ebb12
jump ebb11
brz v14, block12
jump block11
ebb12:
jump ebb4(v11, v11)
block12:
jump block4(v11, v11)
ebb11:
jump ebb10(v11)
block11:
jump block10(v11)
ebb13:
block13:
v15 = iconst.i64 1
jump ebb10(v15)
jump block10(v15)
ebb10(v21: i64):
block10(v21: i64):
v16 = iconst.i32 0
brnz v16, ebb6(v21, v11)
jump ebb19
brnz v16, block6(v21, v11)
jump block19
ebb19:
block19:
v17 = iconst.i32 0xffff_ffff_ffff_9f35
jump ebb8(v17, v21, v11)
jump block8(v17, v21, v11)
ebb8(v8: i32, v23: i64, v28: i64):
jump ebb7(v8, v23, v28)
block8(v8: i32, v23: i64, v28: i64):
jump block7(v8, v23, v28)
ebb14:
block14:
v18 = iconst.i32 0
jump ebb7(v18, v11, v29)
jump block7(v18, v11, v29)
ebb7(v7: i32, v22: i64, v27: i64):
jump ebb6(v22, v27)
block7(v7: i32, v22: i64, v27: i64):
jump block6(v22, v27)
ebb6(v20: i64, v25: i64):
block6(v20: i64, v25: i64):
v19 = iconst.i32 0xffc7
brnz v19, ebb4(v20, v25)
jump ebb5
brnz v19, block4(v20, v25)
jump block5
ebb5:
jump ebb3(v25)
block5:
jump block3(v25)
ebb3(v24: i64):
jump ebb2(v24)
block3(v24: i64):
jump block2(v24)
ebb2(v4: i64):
jump ebb1(v4)
block2(v4: i64):
jump block1(v4)
ebb1(v2: i64):
block1(v2: i64):
return v2
}

View File

@@ -5,81 +5,81 @@ function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8])
gv0 = vmctx
heap0 = static gv0, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i64):
block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i64):
[RexOp1pu_id#b8] v5 = iconst.i32 0
[RexOp1pu_id#b8] v6 = iconst.i32 0
[RexOp1tjccb#74] brz v6, ebb10
[Op1jmpb#eb] jump ebb3(v5, v5, v5, v5, v5, v5, v0, v1, v2, v3)
[RexOp1tjccb#74] brz v6, block10
[Op1jmpb#eb] jump block3(v5, v5, v5, v5, v5, v5, v0, v1, v2, v3)
ebb3(v15: i32, v17: i32, v25: i32, v31: i32, v40: i32, v47: i32, v54: i32, v61: i32, v68: i32, v75: i32):
[Op1jmpb#eb] jump ebb6
block3(v15: i32, v17: i32, v25: i32, v31: i32, v40: i32, v47: i32, v54: i32, v61: i32, v68: i32, v75: i32):
[Op1jmpb#eb] jump block6
ebb6:
block6:
[RexOp1pu_id#b8] v8 = iconst.i32 0
[RexOp1tjccb#75] brnz v8, ebb5
[Op1jmpb#eb] jump ebb20
[RexOp1tjccb#75] brnz v8, block5
[Op1jmpb#eb] jump block20
ebb20:
block20:
[RexOp1pu_id#b8] v9 = iconst.i32 0
[RexOp1pu_id#b8] v11 = iconst.i32 0
[DynRexOp1icscc#39] v12 = icmp.i32 eq v15, v11
[RexOp2urm_noflags#4b6] v13 = bint.i32 v12
[DynRexOp1rr#21] v14 = band v9, v13
[RexOp1tjccb#75] brnz v14, ebb6
[Op1jmpb#eb] jump ebb7
[RexOp1tjccb#75] brnz v14, block6
[Op1jmpb#eb] jump block7
ebb7:
[RexOp1tjccb#74] brz.i32 v17, ebb8
[Op1jmpb#eb] jump ebb17
block7:
[RexOp1tjccb#74] brz.i32 v17, block8
[Op1jmpb#eb] jump block17
ebb17:
block17:
[RexOp1pu_id#b8] v18 = iconst.i32 0
[RexOp1tjccb#74] brz v18, ebb9
[Op1jmpb#eb] jump ebb16
[RexOp1tjccb#74] brz v18, block9
[Op1jmpb#eb] jump block16
ebb16:
block16:
[RexOp1pu_id#b8] v21 = iconst.i32 0
[RexOp1umr#89] v79 = uextend.i64 v5
[DynRexOp1r_ib#8083] v80 = iadd_imm.i64 v4, 0
[RexOp1ld#808b] v81 = load.i64 v80
[DynRexOp1rr#8001] v22 = iadd v81, v79
[RexMp1st#189] istore16 v21, v22
[Op1jmpb#eb] jump ebb9
[Op1jmpb#eb] jump block9
ebb9:
[Op1jmpb#eb] jump ebb8
block9:
[Op1jmpb#eb] jump block8
ebb8:
block8:
[RexOp1pu_id#b8] v27 = iconst.i32 3
[RexOp1pu_id#b8] v28 = iconst.i32 4
[DynRexOp1rr#09] v35 = bor.i32 v31, v13
[RexOp1tjccb#75] brnz v35, ebb15(v27)
[Op1jmpb#eb] jump ebb15(v28)
[RexOp1tjccb#75] brnz v35, block15(v27)
[Op1jmpb#eb] jump block15(v28)
ebb15(v36: i32):
[Op1jmpb#eb] jump ebb3(v25, v36, v25, v31, v40, v47, v54, v61, v68, v75)
block15(v36: i32):
[Op1jmpb#eb] jump block3(v25, v36, v25, v31, v40, v47, v54, v61, v68, v75)
ebb5:
[Op1jmpb#eb] jump ebb4
block5:
[Op1jmpb#eb] jump block4
ebb4:
[Op1jmpb#eb] jump ebb2(v40, v47, v54, v61, v68, v75)
block4:
[Op1jmpb#eb] jump block2(v40, v47, v54, v61, v68, v75)
ebb10:
block10:
[RexOp1pu_id#b8] v43 = iconst.i32 0
[Op1jmpb#eb] jump ebb2(v43, v5, v0, v1, v2, v3)
[Op1jmpb#eb] jump block2(v43, v5, v0, v1, v2, v3)
ebb2(v7: i32, v45: i32, v52: i32, v59: i32, v66: i32, v73: i32):
block2(v7: i32, v45: i32, v52: i32, v59: i32, v66: i32, v73: i32):
[RexOp1pu_id#b8] v44 = iconst.i32 0
[RexOp1tjccb#74] brz v44, ebb12
[Op1jmpb#eb] jump ebb18
[RexOp1tjccb#74] brz v44, block12
[Op1jmpb#eb] jump block18
ebb18:
block18:
[RexOp1pu_id#b8] v50 = iconst.i32 11
[RexOp1tjccb#74] brz v50, ebb14
[Op1jmpb#eb] jump ebb19
[RexOp1tjccb#74] brz v50, block14
[Op1jmpb#eb] jump block19
ebb19:
block19:
[RexOp1umr#89] v82 = uextend.i64 v52
[DynRexOp1r_ib#8083] v83 = iadd_imm.i64 v4, 0
[RexOp1ld#808b] v84 = load.i64 v83
@@ -91,25 +91,25 @@ function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8])
[DynRexOp1rr#8001] v64 = iadd v87, v85
[RexOp1st#88] istore8 v59, v64
[RexOp1pu_id#b8] v65 = iconst.i32 0
[Op1jmpb#eb] jump ebb13(v65)
[Op1jmpb#eb] jump block13(v65)
ebb14:
[Op1jmpb#eb] jump ebb13(v66)
block14:
[Op1jmpb#eb] jump block13(v66)
ebb13(v51: i32):
block13(v51: i32):
[RexOp1umr#89] v88 = uextend.i64 v45
[DynRexOp1r_ib#8083] v89 = iadd_imm.i64 v4, 0
[RexOp1ld#808b] v90 = load.i64 v89
[DynRexOp1rr#8001] v71 = iadd v90, v88
[RexOp1st#89] store v51, v71
[Op1jmpb#eb] jump ebb12
[Op1jmpb#eb] jump block12
ebb12:
[Op1jmpb#eb] jump ebb11
block12:
[Op1jmpb#eb] jump block11
ebb11:
[Op1jmpb#eb] jump ebb1
block11:
[Op1jmpb#eb] jump block1
ebb1:
block1:
[Op1ret#c3] return
}

View File

@@ -6,7 +6,7 @@ target i686
; Tied operands, both are killed at instruction.
function %tied_easy() -> i32 {
ebb0:
block0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; not: copy
@@ -17,7 +17,7 @@ ebb0:
; Tied operand is live after instruction.
function %tied_alive() -> i32 {
ebb0:
block0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; check: $(v0c=$V) = copy v0
@@ -30,7 +30,7 @@ ebb0:
; Fixed register constraint.
function %fixed_op() -> i32 {
ebb0:
block0:
; check: ,%rax]
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
@@ -43,7 +43,7 @@ ebb0:
; Fixed register constraint twice.
function %fixed_op_twice() -> i32 {
ebb0:
block0:
; check: ,%rax]
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
@@ -60,7 +60,7 @@ ebb0:
; Tied use of a diverted register.
function %fixed_op_twice() -> i32 {
ebb0:
block0:
; check: ,%rax]
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12

View File

@@ -6,7 +6,7 @@ target x86_64
function %foo() -> f64 {
fn0 = %bar()
ebb0:
block0:
v0 = f64const 0.0
call fn0()
fallthrough_return v0
@@ -16,7 +16,7 @@ ebb0:
function %foo() -> f64 {
fn0 = %bar() -> f64, f64
ebb0:
block0:
v0, v1 = call fn0()
fallthrough_return v1
}

View File

@@ -1,45 +1,45 @@
test regalloc
target x86_64 haswell
; This test case would create an EBB parameter that was a ghost value.
; This test case would create a block parameter that was a ghost value.
; The coalescer would insert a copy of the ghost value, leading to verifier errors.
;
; We don't allow EBB parameters to be ghost values any longer.
; We don't allow block parameters to be ghost values any longer.
;
; Test case by binaryen fuzzer!
function %pr215(i64 vmctx [%rdi]) system_v {
ebb0(v0: i64):
block0(v0: i64):
v10 = iconst.i64 0
v1 = bitcast.f64 v10
jump ebb5(v1)
jump block5(v1)
ebb5(v9: f64):
block5(v9: f64):
v11 = iconst.i64 0xffff_ffff_ff9a_421a
v4 = bitcast.f64 v11
v6 = iconst.i32 0
v7 = iconst.i32 1
brnz v7, ebb4(v6)
jump ebb8
brnz v7, block4(v6)
jump block8
ebb8:
block8:
v8 = iconst.i32 0
jump ebb7(v8)
jump block7(v8)
ebb7(v5: i32):
brnz v5, ebb3(v4)
jump ebb5(v4)
block7(v5: i32):
brnz v5, block3(v4)
jump block5(v4)
ebb4(v3: i32):
brnz v3, ebb2
jump ebb3(v9)
block4(v3: i32):
brnz v3, block2
jump block3(v9)
ebb3(v2: f64):
jump ebb2
block3(v2: f64):
jump block2
ebb2:
jump ebb1
block2:
jump block1
ebb1:
block1:
return
}

View File

@@ -7,19 +7,19 @@ target i686
; The icmp_imm instrutions write their b1 result to the ABCD register class on
; 32-bit x86. So if we define 5 live values, they can't all fit.
function %global_constraints(i32) {
ebb0(v0: i32):
block0(v0: i32):
v1 = icmp_imm eq v0, 1
v2 = icmp_imm ugt v0, 2
v3 = icmp_imm sle v0, 3
v4 = icmp_imm ne v0, 4
v5 = icmp_imm sge v0, 5
brnz v5, ebb1
jump ebb2
brnz v5, block1
jump block2
ebb2:
block2:
return
ebb1:
block1:
; Make sure v1-v5 are live in.
v10 = band v1, v2
v11 = bor v3, v4

View File

@@ -2,15 +2,15 @@ test regalloc
target x86_64 haswell
function %foo() system_v {
ebb4:
block4:
v3 = iconst.i32 0
jump ebb3
jump block3
ebb3:
block3:
v9 = udiv v3, v3
jump ebb1
jump block1
ebb1:
block1:
v19 = iadd.i32 v9, v9
jump ebb3
jump block3
}

View File

@@ -2,43 +2,43 @@ test regalloc
target x86_64
function u0:587() fast {
ebb0:
block0:
v97 = iconst.i32 0
v169 = iconst.i32 0
v1729 = iconst.i32 0
jump ebb100(v97, v97, v97, v97, v97)
jump block100(v97, v97, v97, v97, v97)
ebb100(v1758: i32, v1784: i32, v1845: i32, v1856: i32, v1870: i32):
block100(v1758: i32, v1784: i32, v1845: i32, v1856: i32, v1870: i32):
v1762 = iconst.i32 0
v1769 = iconst.i32 0
v1774 = iconst.i32 0
v1864 = iconst.i32 0
v1897 = iconst.i32 0
jump ebb102(v1774, v1784, v1845, v1856, v1870, v1758, v1762, v169, v1729, v97, v169, v169, v169, v169)
jump block102(v1774, v1784, v1845, v1856, v1870, v1758, v1762, v169, v1729, v97, v169, v169, v169, v169)
ebb102(v1785: i32, v1789: i32, v1843: i32, v1854: i32, v1868: i32, v1882: i32, v1890: i32, v1901: i32, v1921: i32, v1933: i32, v2058: i32, v2124: i32, v2236: i32, v2366: i32):
block102(v1785: i32, v1789: i32, v1843: i32, v1854: i32, v1868: i32, v1882: i32, v1890: i32, v1901: i32, v1921: i32, v1933: i32, v2058: i32, v2124: i32, v2236: i32, v2366: i32):
v1929 = iconst.i32 0
v1943 = iconst.i32 0
v1949 = iconst.i32 0
jump ebb123(v1897, v1769)
jump block123(v1897, v1769)
ebb123(v1950: i32, v1979: i32):
block123(v1950: i32, v1979: i32):
v1955 = iconst.i32 0
brz v1955, ebb125
jump ebb122(v1929, v1843, v1864, v2058, v1882, v1897, v1943, v1868, v2124, v1901)
brz v1955, block125
jump block122(v1929, v1843, v1864, v2058, v1882, v1897, v1943, v1868, v2124, v1901)
ebb125:
block125:
v1961 = iadd_imm.i32 v1949, 0
v1952 = iconst.i32 0
v1962 = iconst.i64 0
v1963 = load.i32 v1962
brz v1963, ebb123(v1952, v1961)
jump ebb127
brz v1963, block123(v1952, v1961)
jump block127
ebb127:
block127:
v1966 = iconst.i32 0
jump ebb122(v1963, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966)
jump block122(v1963, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966, v1966)
ebb122(v1967: i32, v1971: i32, v1972: i32, v1978: i32, v2032: i32, v2041: i32, v2053: i32, v2076: i32, v2085: i32, v2096: i32):
block122(v1967: i32, v1971: i32, v1972: i32, v1978: i32, v2032: i32, v2041: i32, v2053: i32, v2076: i32, v2085: i32, v2096: i32):
trap user0
}

View File

@@ -7,31 +7,31 @@ target riscv32
; resolve that conflict since v1 will just interfere with the inserted copy too.
;function %c1(i32) -> i32 {
;ebb0(v0: i32):
;block0(v0: i32):
; v1 = iadd_imm v0, 1
; v2 = iconst.i32 1
; brz v1, ebb1(v2)
; jump ebb2
; brz v1, block1(v2)
; jump block2
;
;ebb1(v3: i32):
;block1(v3: i32):
; return v3
;
;ebb2:
; jump ebb1(v1)
;block2:
; jump block1(v1)
;}
; Same thing with v1 and v2 swapped to reverse the order of definitions.
function %c2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iadd_imm v0, 1
v2 = iconst.i32 1
brz v2, ebb1(v1)
jump ebb2
brz v2, block1(v1)
jump block2
ebb1(v3: i32):
block1(v3: i32):
return v3
ebb2:
jump ebb1(v2)
block2:
jump block1(v2)
}

View File

@@ -2,7 +2,7 @@ test regalloc
target x86_64 haswell
function u0:9(i64 [%rdi], f32 [%xmm0], f64 [%xmm1], i32 [%rsi], i32 [%rdx], i64 vmctx [%r14]) -> i64 [%rax] baldrdash_system_v {
ebb0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64):
block0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64):
v32 = iconst.i32 0
v6 = bitcast.f32 v32
v7 = iconst.i64 0
@@ -19,23 +19,23 @@ ebb0(v0: i64, v1: f32, v2: f64, v3: i32, v4: i32, v5: i64):
v14 = bitcast.f64 v36
v44 = iconst.i64 0
v37 = icmp slt v0, v44
brnz v37, ebb2
jump ebb11
brnz v37, block2
jump block11
ebb11:
block11:
v38 = fcvt_from_sint.f64 v0
jump ebb3(v38)
jump block3(v38)
ebb2:
block2:
v45 = iconst.i32 1
v39 = ushr.i64 v0, v45
v40 = band_imm.i64 v0, 1
v41 = bor v39, v40
v42 = fcvt_from_sint.f64 v41
v43 = fadd v42, v42
jump ebb3(v43)
jump block3(v43)
ebb3(v15: f64):
block3(v15: f64):
v16 = fpromote.f64 v9
v46 = uextend.i64 v10
v17 = fcvt_from_sint.f64 v46
@@ -43,42 +43,42 @@ ebb3(v15: f64):
v19 = fpromote.f64 v12
v54 = iconst.i64 0
v47 = icmp.i64 slt v13, v54
brnz v47, ebb4
jump ebb12
brnz v47, block4
jump block12
ebb12:
block12:
v48 = fcvt_from_sint.f64 v13
jump ebb5(v48)
jump block5(v48)
ebb4:
block4:
v55 = iconst.i32 1
v49 = ushr.i64 v13, v55
v50 = band_imm.i64 v13, 1
v51 = bor v49, v50
v52 = fcvt_from_sint.f64 v51
v53 = fadd v52, v52
jump ebb5(v53)
jump block5(v53)
ebb5(v20: f64):
block5(v20: f64):
v63 = iconst.i64 0
v56 = icmp.i64 slt v7, v63
brnz v56, ebb6
jump ebb13
brnz v56, block6
jump block13
ebb13:
block13:
v57 = fcvt_from_sint.f64 v7
jump ebb7(v57)
jump block7(v57)
ebb6:
block6:
v64 = iconst.i32 1
v58 = ushr.i64 v7, v64
v59 = band_imm.i64 v7, 1
v60 = bor v58, v59
v61 = fcvt_from_sint.f64 v60
v62 = fadd v61, v61
jump ebb7(v62)
jump block7(v62)
ebb7(v21: f64):
block7(v21: f64):
v22 = fadd v21, v14
v23 = fadd.f64 v20, v22
v24 = fadd.f64 v19, v23
@@ -90,34 +90,34 @@ ebb7(v21: f64):
v30 = x86_cvtt2si.i64 v29
v69 = iconst.i64 0x8000_0000_0000_0000
v65 = icmp ne v30, v69
brnz v65, ebb8
jump ebb15
brnz v65, block8
jump block15
ebb15:
block15:
v66 = fcmp uno v29, v29
brz v66, ebb9
jump ebb16
brz v66, block9
jump block16
ebb16:
block16:
trap bad_toint
ebb9:
block9:
v70 = iconst.i64 0xc3e0_0000_0000_0000
v67 = bitcast.f64 v70
v68 = fcmp gt v67, v29
brz v68, ebb10
jump ebb17
brz v68, block10
jump block17
ebb17:
block17:
trap int_ovf
ebb10:
jump ebb8
block10:
jump block8
ebb8:
jump ebb1(v30)
block8:
jump block1(v30)
ebb1(v31: i64):
block1(v31: i64):
return v31
}
@@ -126,7 +126,7 @@ function u0:26(i64 vmctx [%r14]) -> i64 [%rax] baldrdash_system_v {
gv0 = iadd_imm.i64 gv1, 48
sig0 = (i32 [%rdi], i64 [%rsi], i64 vmctx [%r14], i64 sigid [%rbx]) -> i64 [%rax] baldrdash_system_v
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i32 32
v2 = iconst.i64 64
v3 = iconst.i32 9
@@ -135,30 +135,30 @@ ebb0(v0: i64):
v6 = load.i32 v5
v7 = icmp uge v3, v6
; If we're unlucky, there are no ABCD registers available for v7 at this branch.
brz v7, ebb2
jump ebb4
brz v7, block2
jump block4
ebb4:
block4:
trap oob
ebb2:
block2:
v8 = load.i64 v5+8
v9 = uextend.i64 v3
v16 = iconst.i64 16
v10 = imul v9, v16
v11 = iadd v8, v10
v12 = load.i64 v11
brnz v12, ebb3
jump ebb5
brnz v12, block3
jump block5
ebb5:
block5:
trap icall_null
ebb3:
block3:
v13 = load.i64 v11+8
v14 = call_indirect.i64 sig0, v12(v1, v2, v13, v4)
jump ebb1(v14)
jump block1(v14)
ebb1(v15: i64):
block1(v15: i64):
return v15
}

View File

@@ -14,16 +14,16 @@ target x86_64 haswell
;
; - The same value used for a tied operand and a fixed operand.
; - The common value is already in %rcx.
; - The tied output value is live outside the EBB.
; - The tied output value is live outside the block.
;
; Under these conditions, Solver::add_tied_input() would create a variable for the tied input
; without considering the fixed constraint.
function %pr221(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] {
ebb0(v0: i64, v1: i64, v2: i64, v3: i64):
block0(v0: i64, v1: i64, v2: i64, v3: i64):
v4 = ushr v3, v3
jump ebb1
jump block1
ebb1:
block1:
return v4
}
@@ -37,13 +37,13 @@ ebb1:
; Since the ushr x, x result is forced to be placed in %rcx, we must set the replace_global_defines
; flag so it can be reassigned to a different global register.
function %pr218(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] {
ebb0(v0: i64, v1: i64, v2: i64, v3: i64):
block0(v0: i64, v1: i64, v2: i64, v3: i64):
; check: regmove v3, %rcx ->
v4 = ushr v0, v0
; check: v4 = copy
jump ebb1
jump block1
ebb1:
block1:
; v3 is globally live in %rcx.
; v4 is also globally live. Needs to be assigned something else for the trip across the CFG edge.
v5 = iadd v3, v4

View File

@@ -4,7 +4,7 @@ target x86_64
; Return the same value twice. This needs a copy so that each value can be
; allocated its own register.
function %multiple_returns() -> i64, i64 {
ebb0:
block0:
v2 = iconst.i64 0
return v2, v2
}
@@ -14,7 +14,7 @@ ebb0:
; Same thing, now with a fallthrough_return.
function %multiple_returns() -> i64, i64 {
ebb0:
block0:
v2 = iconst.i64 0
fallthrough_return v2, v2
}

View File

@@ -2,7 +2,7 @@ test regalloc
target x86_64 haswell
function %test(i64) -> i64 system_v {
ebb0(v0: i64):
block0(v0: i64):
v2 = iconst.i64 12
; This division clobbers two of its fixed input registers on x86.
; These are FixedTied constraints that the spiller needs to resolve.

View File

@@ -2,14 +2,14 @@ test regalloc
target x86_64 haswell
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
; Filed as https://github.com/bytecodealliance/cranelift/issues/208
;
; The verifier complains about a branch argument that is not in the same virtual register as the
; corresponding EBB argument.
; corresponding block argument.
;
; The problem was the reload pass rewriting EBB arguments on "brnz v9, ebb3(v9)"
; The problem was the reload pass rewriting block arguments on "brnz v9, block3(v9)"
function %pr208(i64 vmctx [%rdi]) system_v {
gv1 = vmctx
@@ -20,18 +20,18 @@ function %pr208(i64 vmctx [%rdi]) system_v {
fn0 = u0:1 sig0
fn1 = u0:3 sig1
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i32 0
v2 = call fn0(v0)
v20 = iconst.i32 0x4ffe
v16 = icmp uge v2, v20
brz v16, ebb5
jump ebb9
brz v16, block5
jump block9
ebb9:
block9:
trap heap_oob
ebb5:
block5:
v17 = uextend.i64 v2
v18 = iadd_imm.i64 v0, -8
v19 = load.i64 v18
@@ -40,25 +40,25 @@ ebb5:
v21 = iconst.i32 0
v5 = icmp eq v4, v21
v6 = bint.i32 v5
brnz v6, ebb2
jump ebb3(v4)
brnz v6, block2
jump block3(v4)
; check: ebb5:
; check: jump ebb3(v4)
; check: $(splitEdge=$EBB):
; nextln: jump ebb3(v9)
; check: block5:
; check: jump block3(v4)
; check: $(splitEdge=$BB):
; nextln: jump block3(v9)
ebb3(v7: i32):
block3(v7: i32):
call fn1(v0, v7)
v26 = iconst.i32 0x4ffe
v22 = icmp uge v7, v26
brz v22, ebb6
jump ebb10
brz v22, block6
jump block10
ebb10:
block10:
trap heap_oob
ebb6:
block6:
v23 = uextend.i64 v7
v24 = iadd_imm.i64 v0, -8
v25 = load.i64 v24
@@ -66,23 +66,23 @@ ebb6:
v9 = load.i32 v8+56
; check: v9 = spill
; check: brnz $V, $splitEdge
brnz v9, ebb3(v9)
jump ebb4
brnz v9, block3(v9)
jump block4
ebb4:
jump ebb2
block4:
jump block2
ebb2:
block2:
v10 = iconst.i32 0
v31 = iconst.i32 0x4ffe
v27 = icmp uge v10, v31
brz v27, ebb7
jump ebb11
brz v27, block7
jump block11
ebb11:
block11:
trap heap_oob
ebb7:
block7:
v28 = uextend.i64 v10
v29 = iadd_imm.i64 v0, -8
v30 = load.i64 v29
@@ -92,21 +92,21 @@ ebb7:
v13 = iconst.i32 0
v36 = iconst.i32 0x4ffe
v32 = icmp uge v13, v36
brz v32, ebb8
jump ebb12
brz v32, block8
jump block12
ebb12:
block12:
trap heap_oob
ebb8:
block8:
v33 = uextend.i64 v13
v34 = iadd_imm.i64 v0, -8
v35 = load.i64 v34
v14 = iadd v35, v33
v15 = load.i32 v14+12
call fn1(v0, v15)
jump ebb1
jump block1
ebb1:
block1:
return
}

View File

@@ -12,12 +12,12 @@ function u0:0(i64, i64, i64) system_v {
fn1 = u0:94 sig0
fn2 = u0:95 sig1
ebb0(v0: i64, v1: i64, v2: i64):
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i16 0
jump ebb1(v3)
jump block1(v3)
ebb1(v4: i16):
block1(v4: i16):
call fn1()
call fn2(v4)
jump ebb1(v4)
jump block1(v4)
}

View File

@@ -7,7 +7,7 @@ target riscv32 enable_e
function %spill_return() -> i32 {
fn0 = %foo() -> i32 system_v
ebb0:
block0:
v0 = call fn0()
; check: $(reg=$V) = call fn0
; check: v0 = spill $reg
@@ -24,7 +24,7 @@ ebb0:
; on the stack.
function %spilled_copy_arg(i32, i32, i32, i32, i32, i32, i32) -> i32 {
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
; not: copy
; check: v10 = fill v6
v10 = copy v6
@@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
function %spilled_copy_result(i32) -> i32 {
fn0 = %foo(i32)
ebb0(v0: i32):
block0(v0: i32):
; not: copy
; check: v1 = spill v0
v1 = copy v0

View File

@@ -2,7 +2,7 @@ test regalloc
target i686 haswell
function %pr165() system_v {
ebb0:
block0:
v0 = iconst.i32 0x0102_0304
v1 = iconst.i32 0x1102_0304
v2 = iconst.i32 0x2102_0304
@@ -20,7 +20,7 @@ ebb0:
; Same as above, but use so many registers that spilling is required.
; Note: This is also a candidate for using xchg instructions.
function %emergency_spill() system_v {
ebb0:
block0:
v0 = iconst.i32 0x0102_0304
v1 = iconst.i32 0x1102_0304
v2 = iconst.i32 0x2102_0304

View File

@@ -4,16 +4,16 @@ set enable_pinned_reg=true
target x86_64 haswell
function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v {
ebb0(v0: i32, v1: i32, v2: i32, v3: i64):
block0(v0: i32, v1: i32, v2: i32, v3: i64):
v236 = iconst.i32 0x4de9_bd37
v424 = iconst.i32 0
jump ebb37(v424)
jump block37(v424)
ebb37(v65: i32):
block37(v65: i32):
v433 = iconst.i32 0
jump ebb40(v433)
jump block40(v433)
ebb40(v70: i32):
block40(v70: i32):
v75 = iconst.i32 0
v259 = iconst.i32 0
v78 -> v259
@@ -28,17 +28,17 @@ ebb40(v70: i32):
v272 = iconst.i32 0x4de9_bd37
v490, v273 = x86_smulx v100, v272
v493 = iconst.i32 0
jump ebb61(v493)
jump block61(v493)
ebb61(v103: i32):
block61(v103: i32):
v104 = iconst.i32 -23
v105 = iconst.i32 -23
v106 = popcnt v105
v500 = sshr_imm v104, 31
v501 = iconst.i32 0
jump ebb64(v501)
jump block64(v501)
ebb64(v107: i32):
block64(v107: i32):
v108 = iconst.i32 0
v109 = iconst.i32 0
v278 = iconst.i32 0
@@ -49,9 +49,9 @@ ebb64(v107: i32):
v283 = iadd v281, v282
v111 -> v283
v112 = rotr v108, v283
jump ebb65
jump block65
ebb65:
block65:
v509 = iconst.i32 0
v510, v511 = x86_sdivmodx v107, v509, v112
v113 -> v510

View File

@@ -4,16 +4,16 @@ set enable_pinned_reg=true
target x86_64 haswell
function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v {
ebb0(v0: i32, v1: i32, v2: i32, v3: i64):
block0(v0: i32, v1: i32, v2: i32, v3: i64):
v5 = iconst.i32 -8
v114 = iconst.i32 0
v16 = iconst.i32 -8
v17 = popcnt v16
v192 = ifcmp_imm v17, -1
trapif ne v192, user0
jump ebb12
jump block12
ebb12:
block12:
v122 = iconst.i32 0
v123 = ushr_imm v122, 31
v124 = iadd v122, v123
@@ -23,51 +23,51 @@ ebb12:
v31 -> v204
v210 = ifcmp_imm v31, -1
trapif ne v210, user0
jump ebb18
jump block18
ebb18:
block18:
v215 = iconst.i32 0
jump ebb19(v215)
jump block19(v215)
ebb19(v32: i32):
block19(v32: i32):
v35 = iconst.i32 0
v218 = ifcmp_imm v35, -1
trapif ne v218, user0
jump ebb21
jump block21
ebb21:
block21:
v223 = iconst.i32 0
jump ebb22(v223)
jump block22(v223)
ebb22(v36: i32):
block22(v36: i32):
v136 = iconst.i32 0
v40 -> v136
v227 = ifcmp_imm v136, -1
trapif ne v227, user0
jump ebb24
jump block24
ebb24:
block24:
v232 = iconst.i32 0
jump ebb25(v232)
jump block25(v232)
ebb25(v41: i32):
block25(v41: i32):
v142 = iconst.i32 0
v45 -> v142
v236 = ifcmp_imm v142, -1
trapif ne v236, user0
jump ebb27
jump block27
ebb27:
block27:
v241 = iconst.i32 0
jump ebb28(v241)
jump block28(v241)
ebb28(v46: i32):
block28(v46: i32):
v49 = iconst.i32 0
v244 = ifcmp_imm v49, -1
trapif ne v244, user0
jump ebb30
jump block30
ebb30:
block30:
v254 = iconst.i32 0
v53 -> v254
v54 = iconst.i32 -23
@@ -80,9 +80,9 @@ ebb30:
v148 = iadd v146, v147
v57 -> v148
v58 = ishl v53, v148
jump ebb35
jump block35
ebb35:
block35:
v262 = iconst.i32 0
v263, v264 = x86_sdivmodx v46, v262, v58
v59 -> v263
@@ -93,9 +93,9 @@ ebb35:
v280 = iconst.i32 0
v281 = ffcmp v61, v61
trapff ord v281, user0
jump ebb41(v280)
jump block41(v280)
ebb41(v62: i32):
block41(v62: i32):
v157 = iconst.i32 0
v158 = sshr_imm v157, 4
v159 = iconst.i32 0
@@ -103,9 +103,9 @@ ebb41(v62: i32):
v75 -> v160
v308 = ifcmp_imm v160, -1
trapif ne v308, user0
jump ebb52
jump block52
ebb52:
block52:
v87 = iconst.i32 -23
v88 = iconst.i32 -23
v89 = popcnt v88

View File

@@ -6,7 +6,7 @@ target x86_64 haswell
;; Test for the issue #1123; https://github.com/bytecodealliance/cranelift/issues/1123
function u0:0(i32, i32, i32, i64 vmctx) -> i64 uext system_v {
ebb0(v0: i32, v1: i32, v2: i32, v3: i64):
block0(v0: i32, v1: i32, v2: i32, v3: i64):
v351 = iconst.i32 0x4de9_bd37
v31 = iconst.i32 -23
v35 = iconst.i32 0
@@ -24,68 +24,68 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i64):
v53 = iconst.i32 0
v547 = ifcmp_imm v53, -1
trapif ne v547, user0
jump ebb30
jump block30
ebb30:
block30:
v75 = iconst.i32 0
v581 = ifcmp_imm v75, -1
trapif ne v581, user0
jump ebb42
jump block42
ebb42:
block42:
v136 = iconst.i32 0
v691 = ifcmp_imm v136, -1
trapif ne v691, user0
jump ebb81
jump block81
ebb81:
block81:
v158 = iconst.i32 0
v725 = ifcmp_imm v158, -1
trapif ne v725, user0
jump ebb93
jump block93
ebb93:
block93:
v760 = iconst.i32 0
jump ebb106(v760)
jump block106(v760)
ebb106(v175: i32):
block106(v175: i32):
v179 = iconst.i32 0
v180 = icmp_imm eq v179, 0
v183 = iconst.i32 0
v766 = ifcmp_imm v183, -1
trapif ne v766, user0
jump ebb108
jump block108
ebb108:
block108:
v771 = iconst.i32 0
jump ebb109(v771)
jump block109(v771)
ebb109(v184: i32):
block109(v184: i32):
v785 = iconst.i32 0
v193 -> v785
v791 = ifcmp_imm v193, -1
trapif ne v791, user0
jump ebb117
jump block117
ebb117:
block117:
v796 = iconst.i32 0
jump ebb118(v796)
jump block118(v796)
ebb118(v194: i32):
block118(v194: i32):
v203 = iconst.i32 -63
v809 = iconst.i32 0
v207 -> v809
v815 = ifcmp_imm v207, -1
trapif ne v815, user0
jump ebb126
jump block126
ebb126:
block126:
v209 = iconst.i32 0
v823 = ifcmp_imm v209, -1
trapif ne v823, user0
jump ebb129
jump block129
ebb129:
block129:
v213 = iconst.i32 -23
v214 = iconst.i32 -19
v215 = icmp_imm eq v214, 0
@@ -111,9 +111,9 @@ ebb129:
v858, v859 = x86_sdivmodx v175, v857, v232
v233 -> v858
v915 = iconst.i32 0
jump ebb163(v915)
jump block163(v915)
ebb163(v253: i32):
block163(v253: i32):
v255 = iconst.i32 0
v256 = iconst.i32 -23
v257 = iconst.i32 -19

View File

@@ -7,20 +7,20 @@ target x86_64
; 'Ran out of GPR registers when inserting copy before v68 = icmp.i32 eq v66, v67',
; cranelift-codegen/src/regalloc/spilling.rs:425:28 message.
;
; The process_reg_uses() function is trying to insert a copy before the icmp instruction in ebb4
; and runs out of registers to spill. Note that ebb7 has a lot of dead parameter values.
; The process_reg_uses() function is trying to insert a copy before the icmp instruction in block4
; and runs out of registers to spill. Note that block7 has a lot of dead parameter values.
;
; The spiller was not releasing register pressure for dead EBB parameters.
; The spiller was not releasing register pressure for dead block parameters.
function %pr223(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v {
ebb0(v0: i32, v1: i64):
block0(v0: i32, v1: i64):
v2 = iconst.i32 0
v3 = iconst.i64 0
v4 = iconst.i32 0xffff_ffff_bb3f_4a2c
brz v4, ebb5
jump ebb1
brz v4, block5
jump block1
ebb1:
block1:
v5 = iconst.i32 0
v6 = copy.i64 v3
v7 = copy.i64 v3
@@ -33,10 +33,10 @@ ebb1:
v14 = copy.i64 v3
v15 = copy.i64 v3
v16 = copy.i64 v3
brnz v5, ebb4(v2, v3, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16)
jump ebb2
brnz v5, block4(v2, v3, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16)
jump block2
ebb2:
block2:
v17 = iconst.i32 0
v18 = copy.i64 v3
v19 = copy.i64 v3
@@ -49,19 +49,19 @@ ebb2:
v26 = copy.i64 v3
v27 = copy.i64 v3
v28 = copy.i64 v3
brnz v17, ebb4(v2, v3, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28)
jump ebb3
brnz v17, block4(v2, v3, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28)
jump block3
ebb3:
jump ebb1
block3:
jump block1
ebb4(v29: i32, v30: i64, v31: i64, v32: i64, v33: i64, v34: i64, v35: i64, v36: i64, v37: i64, v38: i64, v39: i64, v40: i64, v41: i64):
jump ebb7(v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41)
block4(v29: i32, v30: i64, v31: i64, v32: i64, v33: i64, v34: i64, v35: i64, v36: i64, v37: i64, v38: i64, v39: i64, v40: i64, v41: i64):
jump block7(v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41)
ebb5:
jump ebb6
block5:
jump block6
ebb6:
block6:
v42 = copy.i64 v3
v43 = copy.i64 v3
v44 = copy.i64 v3
@@ -73,103 +73,103 @@ ebb6:
v50 = copy.i64 v3
v51 = copy.i64 v3
v52 = copy.i64 v3
jump ebb7(v2, v3, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52)
jump block7(v2, v3, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52)
ebb7(v53: i32, v54: i64, v55: i64, v56: i64, v57: i64, v58: i64, v59: i64, v60: i64, v61: i64, v62: i64, v63: i64, v64: i64, v65: i64):
block7(v53: i32, v54: i64, v55: i64, v56: i64, v57: i64, v58: i64, v59: i64, v60: i64, v61: i64, v62: i64, v63: i64, v64: i64, v65: i64):
v66 = iconst.i32 0
v67 = iconst.i32 0
v68 = icmp eq v66, v67
v69 = bint.i32 v68
jump ebb8
jump block8
ebb8:
jump ebb9
block8:
jump block9
ebb9:
block9:
v70 = iconst.i32 0xffff_ffff_ffff_912f
brz v70, ebb10
jump ebb35
brz v70, block10
jump block35
ebb10:
block10:
v71 = iconst.i32 0
brz v71, ebb11
jump ebb27
brz v71, block11
jump block27
ebb11:
jump ebb12
block11:
jump block12
ebb12:
jump ebb13
block12:
jump block13
ebb13:
jump ebb14
block13:
jump block14
ebb14:
jump ebb15
block14:
jump block15
ebb15:
jump ebb16
block15:
jump block16
ebb16:
jump ebb17
block16:
jump block17
ebb17:
jump ebb18
block17:
jump block18
ebb18:
jump ebb19
block18:
jump block19
ebb19:
jump ebb20
block19:
jump block20
ebb20:
jump ebb21
block20:
jump block21
ebb21:
jump ebb22
block21:
jump block22
ebb22:
jump ebb23
block22:
jump block23
ebb23:
jump ebb24
block23:
jump block24
ebb24:
jump ebb25
block24:
jump block25
ebb25:
jump ebb26
block25:
jump block26
ebb26:
jump ebb27
block26:
jump block27
ebb27:
jump ebb28
block27:
jump block28
ebb28:
jump ebb29
block28:
jump block29
ebb29:
jump ebb30
block29:
jump block30
ebb30:
jump ebb31
block30:
jump block31
ebb31:
jump ebb32
block31:
jump block32
ebb32:
jump ebb33
block32:
jump block33
ebb33:
jump ebb34
block33:
jump block34
ebb34:
jump ebb35
block34:
jump block35
ebb35:
jump ebb36
block35:
jump block36
ebb36:
block36:
trap user0
}

View File

@@ -24,8 +24,8 @@ function %pyramid(i32) -> i32 {
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
; not: spill_slot
ebb0(v1: i32):
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
block0(v1: i32):
; check: block0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
@@ -71,7 +71,7 @@ ebb0(v1: i32):
; All values live across a call must be spilled
function %across_call(i32) {
fn0 = %foo(i32)
ebb0(v1: i32):
block0(v1: i32):
; check: v1 = spill
call fn0(v1)
; check: call fn0
@@ -84,7 +84,7 @@ ebb0(v1: i32):
; The same value used for two function arguments.
function %doubleuse(i32) {
fn0 = %xx(i32, i32)
ebb0(v0: i32):
block0(v0: i32):
; check: $(c=$V) = copy v0
call fn0(v0, v0)
; check: call fn0(v0, $c)
@@ -94,7 +94,7 @@ ebb0(v0: i32):
; The same value used as indirect callee and argument.
function %doubleuse_icall1(i32) {
sig0 = (i32) system_v
ebb0(v0: i32):
block0(v0: i32):
; not:copy
call_indirect sig0, v0(v0)
return
@@ -103,7 +103,7 @@ ebb0(v0: i32):
; The same value used as indirect callee and two arguments.
function %doubleuse_icall2(i32) {
sig0 = (i32, i32) system_v
ebb0(v0: i32):
block0(v0: i32):
; check: $(c=$V) = copy v0
call_indirect sig0, v0(v0, v0)
; check: call_indirect sig0, v0(v0, $c)
@@ -115,21 +115,21 @@ function %stackargs(i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss0 = incoming_arg 4
; check: ss1 = incoming_arg 4, offset 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; unordered: fill v6
; unordered: fill v7
v10 = iadd v6, v7
return v10
}
; More EBB arguments than registers.
function %ebbargs(i32) -> i32 {
ebb0(v1: i32):
; More block arguments than registers.
function %blockargs(i32) -> i32 {
block0(v1: i32):
; check: v1 = spill
v2 = iconst.i32 1
jump ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
jump block1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32):
block1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32):
v22 = iadd v10, v11
v23 = iadd v22, v12
v24 = iadd v23, v13
@@ -145,18 +145,18 @@ ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17:
return v33
}
; Spilling an EBB argument to make room for a branch operand.
; Spilling a block argument to make room for a branch operand.
function %brargs(i32) -> i32 {
ebb0(v1: i32):
block0(v1: i32):
; check: v1 = spill
v2 = iconst.i32 1
brnz v1, ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
jump ebb2
brnz v1, block1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
jump block2
ebb2:
block2:
return v1
ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32):
block1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32):
v22 = iadd v10, v11
v23 = iadd v22, v12
v24 = iadd v23, v13
@@ -181,8 +181,8 @@ function %use_spilled_value(i32) -> i32 {
; check: ss0 = spill_slot 4
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
ebb0(v1: i32):
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
block0(v1: i32):
; check: block0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill

View File

@@ -7,41 +7,41 @@ target x86_64 haswell
; This function contains unreachable blocks which trip up the register
; allocator if they don't get cleared out.
function %unreachable_blocks(i64 vmctx) -> i32 baldrdash_system_v {
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i32 0
v2 = iconst.i32 0
jump ebb2
jump block2
ebb2:
jump ebb4
block2:
jump block4
ebb4:
jump ebb2
block4:
jump block2
; Everything below this point is unreachable.
ebb3(v3: i32):
block3(v3: i32):
v5 = iadd.i32 v2, v3
jump ebb6
jump block6
ebb6:
jump ebb6
block6:
jump block6
ebb7(v6: i32):
block7(v6: i32):
v7 = iadd.i32 v5, v6
jump ebb8
jump block8
ebb8:
jump ebb10
block8:
jump block10
ebb10:
jump ebb8
block10:
jump block8
ebb9(v8: i32):
block9(v8: i32):
v10 = iadd.i32 v7, v8
jump ebb1(v10)
jump block1(v10)
ebb1(v11: i32):
block1(v11: i32):
return v11
}

View File

@@ -2,48 +2,48 @@ test regalloc
target i686
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
; The value v9 appears both as the branch control and one of the EBB arguments
; in the brnz instruction in ebb2. It also happens that v7 and v9 are assigned
; The value v9 appears both as the branch control and one of the block arguments
; in the brnz instruction in block2. It also happens that v7 and v9 are assigned
; to the same register, so v9 doesn't need to be moved before the brnz.
;
; This ended up confusong the constraint solver which had not made a record of
; the fixed register assignment for v9 since it was already in the correct
; register.
function %pr147(i32) -> i32 system_v {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 0
v2 = iconst.i32 1
v3 = iconst.i32 0
jump ebb2(v3, v2, v0)
jump block2(v3, v2, v0)
; check: $(splitEdge=$EBB):
; check: jump ebb2($V, $V, v9)
; check: $(splitEdge=$BB):
; check: jump block2($V, $V, v9)
ebb2(v4: i32, v5: i32, v7: i32):
; check: ebb2
block2(v4: i32, v5: i32, v7: i32):
; check: block2
v6 = iadd v4, v5
v8 = iconst.i32 -1
; v7 is killed here and v9 gets the same register.
v9 = iadd v7, v8
; check: v9 = iadd v7, v8
; Here v9 the brnz control appears to interfere with v9 the EBB argument,
; Here v9 the brnz control appears to interfere with v9 the block argument,
; so divert_fixed_input_conflicts() calls add_var(v9), which is ok. The
; add_var sanity checks got confused when no fixed assignment could be
; found for v9.
;
; We should be able to handle this situation without making copies of v9.
brnz v9, ebb2(v5, v6, v9)
brnz v9, block2(v5, v6, v9)
; check: brnz v9, $splitEdge
jump ebb3
jump block3
ebb3:
block3:
return v5
}
function %select_i64(i64, i64, i32) -> i64 {
ebb0(v0: i64, v1: i64, v2: i32):
block0(v0: i64, v1: i64, v2: i32):
v3 = select v2, v0, v1
return v3
}