Remove support for entity variables in filecheck.
Now that the parser doesn't renumber indices, there's no need for entity variables like $v0.
This commit is contained in:
@@ -11,9 +11,9 @@ function %vmctx(i64 vmctx) -> i64 {
|
||||
|
||||
ebb1(v1: i64):
|
||||
v2 = global_addr.i64 gv1
|
||||
; check: $v2 = iadd_imm $v1, -16
|
||||
; check: v2 = iadd_imm v1, -16
|
||||
return v2
|
||||
; check: return $v2
|
||||
; check: return v2
|
||||
}
|
||||
|
||||
function %deref(i64 vmctx) -> i64 {
|
||||
@@ -22,11 +22,11 @@ function %deref(i64 vmctx) -> i64 {
|
||||
|
||||
ebb1(v1: i64):
|
||||
v2 = global_addr.i64 gv2
|
||||
; check: $(a1=$V) = iadd_imm $v1, -16
|
||||
; check: $(a1=$V) = iadd_imm v1, -16
|
||||
; check: $(p1=$V) = load.i64 $a1
|
||||
; check: $v2 = iadd_imm $p1, 32
|
||||
; check: v2 = iadd_imm $p1, 32
|
||||
return v2
|
||||
; check: return $v2
|
||||
; check: return v2
|
||||
}
|
||||
|
||||
function %sym() -> i64 {
|
||||
@@ -35,9 +35,9 @@ function %sym() -> i64 {
|
||||
|
||||
ebb1:
|
||||
v0 = global_addr.i64 gv0
|
||||
; check: $v0 = globalsym_addr.i64 gv0
|
||||
; check: v0 = globalsym_addr.i64 gv0
|
||||
v1 = global_addr.i64 gv1
|
||||
; check: $v1 = globalsym_addr.i64 gv1
|
||||
; check: v1 = globalsym_addr.i64 gv1
|
||||
v2 = bxor v0, v1
|
||||
return v2
|
||||
}
|
||||
@@ -49,18 +49,18 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
|
||||
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v999: i64):
|
||||
; check: $ebb0(
|
||||
; check: ebb0(
|
||||
v1 = heap_addr.i64 heap0, v0, 1
|
||||
; Boundscheck should be eliminated.
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 $v0
|
||||
; nextln: $(haddr=$V) = iadd_imm $v999, 64
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(haddr=$V) = iadd_imm v999, 64
|
||||
; nextln: $(hbase=$V) = load.i64 $haddr
|
||||
; nextln: $v1 = iadd $hbase, $xoff
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+16
|
||||
; nextln: $v2 = load.f32 $v1+16
|
||||
; nextln: v2 = load.f32 v1+16
|
||||
v3 = load.f32 v1+20
|
||||
; nextln: $v3 = load.f32 $v1+20
|
||||
; nextln: v3 = load.f32 v1+20
|
||||
v4 = fadd v2, v3
|
||||
return v4
|
||||
}
|
||||
@@ -73,7 +73,7 @@ ebb0(v0: i32, v999: i64):
|
||||
; Everything after the obviously OOB access should be eliminated, leaving
|
||||
; the `trap heap_oob` instruction as the terminator of the Ebb and moving
|
||||
; the remainder of the instructions into an inaccessible Ebb.
|
||||
; check: $ebb0(
|
||||
; check: ebb0(
|
||||
; nextln: trap heap_oob
|
||||
; check: ebb1:
|
||||
; nextln: v1 = iconst.i64 0
|
||||
@@ -93,7 +93,7 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
|
||||
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v999: i64):
|
||||
; check: $ebb0(
|
||||
; check: ebb0(
|
||||
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
||||
; Boundscheck code
|
||||
; check: $(oob=$V) = icmp
|
||||
@@ -101,12 +101,12 @@ ebb0(v0: i32, v999: i64):
|
||||
; nextln: trap heap_oob
|
||||
; check: $ok:
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 $v0
|
||||
; nextln: $(haddr=$V) = iadd_imm.i64 $v999, 64
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(haddr=$V) = iadd_imm.i64 v999, 64
|
||||
; nextln: $(hbase=$V) = load.i64 $haddr
|
||||
; nextln: $v1 = iadd $hbase, $xoff
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+0x7fff_ffff
|
||||
; nextln: $v2 = load.f32 $v1+0x7fff_ffff
|
||||
; nextln: v2 = load.f32 v1+0x7fff_ffff
|
||||
return v2
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ function %stkchk(i64 vmctx) spiderwasm {
|
||||
gv0 = vmctx+64
|
||||
|
||||
ebb0(v0: i64):
|
||||
; check: $ebb0(
|
||||
; check: ebb0(
|
||||
stack_check gv0
|
||||
; check: $(limit=$V) = load.i64 notrap aligned
|
||||
; check: $(flags=$V) = ifcmp_sp $limit
|
||||
|
||||
Reference in New Issue
Block a user