Remove support for entity variables in filecheck.

Now that the parser doesn't renumber indices, there's no need for entity
variables like $v0.
This commit is contained in:
Dan Gohman
2018-02-20 14:31:01 -08:00
parent a5b00b173e
commit 10dcfcacdb
29 changed files with 343 additions and 395 deletions

View File

@@ -17,7 +17,7 @@ ebb0(v1: i32, v2: i32):
function %dead_arg(i32, i32) -> i32{
ebb0(v1: i32, v2: i32):
; not: regmove
; check: return $v1
; check: return v1
return v1
}
@@ -25,8 +25,8 @@ ebb0(v1: i32, v2: i32):
function %move1(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> %x10
; nextln: return $v2
; check: regmove v2, %x11 -> %x10
; nextln: return v2
return v2
}
@@ -34,10 +34,10 @@ ebb0(v1: i32, v2: i32):
function %swap(i32, i32) -> i32, i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> $(tmp=$RX)
; nextln: regmove $v1, %x10 -> %x11
; nextln: regmove $v2, $tmp -> %x10
; nextln: return $v2, $v1
; check: regmove v2, %x11 -> $(tmp=$RX)
; nextln: regmove v1, %x10 -> %x11
; nextln: regmove v2, $tmp -> %x10
; nextln: return v2, v1
return v2, v1
}

View File

@@ -23,8 +23,8 @@ ebb1(v10: i32):
function %trivial(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($cp1)
; check: $(cp1=$V) = copy v0
; nextln: brnz v0, ebb1($cp1)
brnz v0, ebb1(v0)
; not: copy
v1 = iadd_imm v0, 7
@@ -39,8 +39,8 @@ ebb1(v10: i32):
; A value is used as an SSA argument twice in the same branch.
function %dualuse(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($cp1, $v0)
; check: $(cp1=$V) = copy v0
; nextln: brnz v0, ebb1($cp1, v0)
brnz v0, ebb1(v0, v0)
v1 = iadd_imm v0, 7
v2 = iadd_imm v1, 56
@@ -55,15 +55,15 @@ ebb1(v10: i32, v11: i32):
; The interference can be broken with a copy at either branch.
function %interference(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp0=$V) = copy $v0
; check: $(cp0=$V) = copy v0
; not: copy
; check: brnz $v0, ebb1($cp0)
; check: brnz v0, ebb1($cp0)
brnz v0, ebb1(v0)
v1 = iadd_imm v0, 7
; v1 and v0 interfere here:
v2 = iadd_imm v0, 8
; not: copy
; check: jump $ebb1($v1)
; check: jump ebb1(v1)
jump ebb1(v1)
ebb1(v10: i32):
@@ -81,13 +81,13 @@ ebb0(v0: i32):
ebb1(v10: i32, v11: i32):
; v11 needs to be isolated because it interferes with v10.
; check: $ebb1($v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: $v11 = copy $nv11a
; check: ebb1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: v11 = copy $nv11a
v12 = iadd v10, v11
v13 = icmp ult v12, v0
; check: $(nv11b=$V) = copy $v11
; check: $(nv11b=$V) = copy v11
; not: copy
; check: brnz $v13, $ebb1($nv11b, $v12)
; check: brnz v13, ebb1($nv11b, v12)
brnz v13, ebb1(v11, v12)
return v12
}

View File

@@ -20,10 +20,10 @@ function %tied_alive() -> i32 {
ebb0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; check: $(v0c=$V) = copy $v0
; check: $v2 = isub $v0c, $v1
; check: $(v0c=$V) = copy v0
; check: v2 = isub $v0c, v1
v2 = isub v0, v1
; check: $v3 = iadd $v2, $v0
; check: v3 = iadd v2, v0
v3 = iadd v2, v0
return v3
}
@@ -32,11 +32,11 @@ ebb0:
function %fixed_op() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: regmove v0, %rax -> %rcx
v2 = ishl v1, v0
return v2
}
@@ -45,14 +45,14 @@ ebb0:
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: regmove v0, %rax -> %rcx
v2 = ishl v1, v0
; check: regmove $v0, %rcx -> $REG
; check: regmove $v2, $REG -> %rcx
; check: regmove v0, %rcx -> $REG
; check: regmove v2, $REG -> %rcx
v3 = ishl v0, v2
return v3
@@ -62,12 +62,12 @@ ebb0:
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: $v2 = ishl $v1, $v0
; check: regmove v0, %rax -> %rcx
; check: v2 = ishl v1, v0
v2 = ishl v1, v0
; Now v0 is globally allocated to %rax, but diverted to %rcx.
@@ -77,6 +77,6 @@ ebb0:
; check: ,%rcx]
; sameln: isub
; Move it into place for the return value.
; check: regmove $v3, %rcx -> %rax
; check: regmove v3, %rcx -> %rax
return v3
}

View File

@@ -19,12 +19,12 @@ ebb0(v0: i32):
jump ebb2(v3, v2, v0)
ebb2(v4: i32, v5: i32, v7: i32):
; check: $ebb2
; check: ebb2
v6 = iadd v4, v5
v8 = iconst.i32 -1
; v7 is killed here and v9 gets the same register.
v9 = iadd v7, v8
; check: $v9 = iadd $v7, $v8
; check: v9 = iadd v7, v8
; Here v9 the brnz control appears to interfere with v9 the EBB argument,
; so divert_fixed_input_conflicts() calls add_var(v9), which is ok. The
; add_var sanity checks got confused when no fixed assignment could be
@@ -32,7 +32,7 @@ ebb2(v4: i32, v5: i32, v7: i32):
;
; We should be able to handle this situation without making copies of v9.
brnz v9, ebb2(v5, v6, v9)
; check: brnz $v9, $ebb2($V, $V, $v9)
; check: brnz v9, ebb2($V, $V, v9)
jump ebb3
ebb3:

View File

@@ -39,9 +39,9 @@ ebb1:
; flag so it can be reassigned to a different global register.
function %pr218(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] {
ebb0(v0: i64, v1: i64, v2: i64, v3: i64):
; check: regmove $v3, %rcx ->
; check: regmove v3, %rcx ->
v4 = ushr v0, v0
; check: $v4 = copy
; check: v4 = copy
jump ebb1
ebb1:

View File

@@ -52,8 +52,8 @@ ebb6:
v25 = load.i64 v24
v8 = iadd v25, v23
v9 = load.i32 v8+56
; check: $v9 = spill
; check: brnz $V, $ebb3($v9)
; check: v9 = spill
; check: brnz $V, ebb3(v9)
brnz v9, ebb3(v9)
jump ebb4

View File

@@ -9,11 +9,11 @@ function %spill_return() -> i32 {
ebb0:
v0 = call fn0()
; check: $(reg=$V) = call $fn0
; check: $v0 = spill $reg
; check: $(reg=$V) = call fn0
; check: v0 = spill $reg
v2 = call fn0()
; check: $v2 = call $fn0
; check: v2 = call fn0
return v0
; check: $(reload=$V) = fill $v0
; check: $(reload=$V) = fill v0
; check: return $reload
}

View File

@@ -25,13 +25,13 @@ function %pyramid(i32) -> i32 {
; check: ss2 = spill_slot 4
; not: spill_slot
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS $v1 = spill $rv1
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
; nextln: ,ss2]$WS v2 = spill $r1v2
; not: spill
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
@@ -46,7 +46,7 @@ ebb0(v1: i32):
v13 = iadd_imm v12, 12
v14 = iadd_imm v13, 12
v33 = iadd v13, v14
; check: iadd $v13
; check: iadd v13
v32 = iadd v33, v12
v31 = iadd v32, v11
v30 = iadd v31, v10
@@ -58,26 +58,26 @@ ebb0(v1: i32):
v24 = iadd v25, v4
v23 = iadd v24, v3
v22 = iadd v23, v2
; check: $(r2v2=$V) = fill $v2
; check: $v22 = iadd $v23, $r2v2
; check: $(r2v2=$V) = fill v2
; check: v22 = iadd v23, $r2v2
v21 = iadd v22, v1
; check: $(r2v1=$V) = fill $v1
; check: $v21 = iadd $v22, $r2v1
; check: $(r2v1=$V) = fill v1
; check: v21 = iadd v22, $r2v1
; check: $(rlink2=$V) = fill $link
return v21
; check: return $v21, $rlink2
; check: return v21, $rlink2
}
; All values live across a call must be spilled
function %across_call(i32) {
fn0 = function %foo(i32)
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
call fn0(v1)
; check: call $fn0
; check: call fn0
call fn0(v1)
; check: fill $v1
; check: call $fn0
; check: fill v1
; check: call fn0
return
}
@@ -85,9 +85,9 @@ ebb0(v1: i32):
function %doubleuse(i32) {
fn0 = function %xx(i32, i32)
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
; check: $(c=$V) = copy v0
call fn0(v0, v0)
; check: call $fn0($v0, $c)
; check: call fn0(v0, $c)
return
}
@@ -104,9 +104,9 @@ ebb0(v0: i32):
function %doubleuse_icall2(i32) {
sig0 = (i32, i32) native
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
; check: $(c=$V) = copy v0
call_indirect sig0, v0(v0, v0)
; check: call_indirect $sig0, $v0($v0, $c)
; check: call_indirect sig0, v0(v0, $c)
return
}
@@ -116,8 +116,8 @@ function %stackargs(i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss1 = incoming_arg 4, offset 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; unordered: fill $v6
; unordered: fill $v7
; unordered: fill v6
; unordered: fill v7
v10 = iadd v6, v7
return v10
}
@@ -125,7 +125,7 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; More EBB arguments than registers.
function %ebbargs(i32) -> i32 {
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
v2 = iconst.i32 1
jump ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
@@ -148,7 +148,7 @@ ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17:
; Spilling an EBB argument to make room for a branch operand.
function %brargs(i32) -> i32 {
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
v2 = iconst.i32 1
brnz v1, ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
return v1
@@ -179,13 +179,13 @@ function %use_spilled_value(i32) -> i32 {
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS $v1 = spill $rv1
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
; nextln: ,ss2]$WS v2 = spill $r1v2
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
v5 = iadd_imm v4, 12