Remove support for entity variables in filecheck.

Now that the parser doesn't renumber indices, there's no need for entity
variables like $v0.
This commit is contained in:
Dan Gohman
2018-02-20 14:31:01 -08:00
parent a5b00b173e
commit 10dcfcacdb
29 changed files with 343 additions and 395 deletions

View File

@@ -11,8 +11,8 @@ function %cond_trap(i32) {
ebb0(v1: i32):
trapz v1, user67
return
; check: $ebb0($v1: i32
; nextln: $(f=$V) = ifcmp_imm $v1, 0
; check: ebb0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $f, user67
; nextln: return
}
@@ -21,8 +21,8 @@ function %cond_trap2(i32) {
ebb0(v1: i32):
trapnz v1, int_ovf
return
; check: $ebb0($v1: i32
; nextln: $(f=$V) = ifcmp_imm $v1, 0
; check: ebb0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif ne $f, int_ovf
; nextln: return
}
@@ -32,8 +32,8 @@ ebb0(v1: i32):
v2 = icmp_imm eq v1, 6
trapz v2, user7
return
; check: $ebb0($v1: i32
; check: brnz $v2, $(new=$EBB)
; check: ebb0(v1: i32
; check: brnz v2, $(new=$EBB)
; nextln: trap user7
; check: $new:
; nextln: return
@@ -44,8 +44,8 @@ ebb0(v1: i32):
v2 = icmp_imm eq v1, 6
trapnz v2, user9
return
; check: $ebb0($v1: i32
; check: brz $v2, $(new=$EBB)
; check: ebb0(v1: i32
; check: brz v2, $(new=$EBB)
; nextln: trap user9
; check: $new:
; nextln: return
@@ -55,7 +55,7 @@ function %f32const() -> f32 {
ebb0:
v1 = f32const 0x1.0p1
; check: $(tmp=$V) = iconst.i32
; check: $v1 = bitcast.f32 $tmp
; check: v1 = bitcast.f32 $tmp
return v1
}
@@ -63,17 +63,17 @@ function %f64const() -> f64 {
ebb0:
v1 = f64const 0x1.0p1
; check: $(tmp=$V) = iconst.i64
; check: $v1 = bitcast.f64 $tmp
; check: v1 = bitcast.f64 $tmp
return v1
}
function %select_f64(f64, f64, i32) -> f64 {
ebb0(v0: f64, v1: f64, v2: i32):
v3 = select v2, v0, v1
; check: brnz v2, $(new=$EBB)($v0)
; nextln: jump $new($v1)
; check: $new($v3: f64):
; nextln: return $v3
; check: brnz v2, $(new=$EBB)(v0)
; nextln: jump $new(v1)
; check: $new(v3: f64):
; nextln: return v3
return v3
}
@@ -81,17 +81,17 @@ function %f32_min(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
; check: $(vnat=$V) = x86_fmin $v0, $v1
; check: $(vnat=$V) = x86_fmin v0, v1
; nextln: jump $(done=$EBB)($vnat)
; check: $(uno=$EBB):
; nextln: $(vuno=$V) = fadd.f32 $v0, $v1
; nextln: $(vuno=$V) = fadd.f32 v0, v1
; nextln: jump $(done=$EBB)($vuno)
; check: $(ueq=$EBB):
; check: $(veq=$V) = bor.f32 $v0, $v1
; check: $(veq=$V) = bor.f32 v0, v1
; nextln: jump $(done=$EBB)($veq)
; check: $done($v2: f32):
; nextln: return $v2
; check: $done(v2: f32):
; nextln: return v2
}

View File

@@ -10,63 +10,63 @@ isa intel
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 $v0, $imin
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}

View File

@@ -10,30 +10,30 @@ isa intel
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = urem v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = sdiv v0, v1
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
return v2
; nextln: return $d
}
@@ -42,17 +42,17 @@ ebb0(v0: i64, v1: i64):
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}

View File

@@ -12,4 +12,4 @@ ebb0(v0: f32):
; check: function %floor(f32 [%xmm0]) -> f32 [%xmm0] native {
; check: sig0 = (f32) -> f32 native
; check: fn0 = sig0 %FloorF32
; check: $v1 = call fn0($v0)
; check: v1 = call fn0(v0)

View File

@@ -11,9 +11,9 @@ function %vmctx(i64 vmctx) -> i64 {
ebb1(v1: i64):
v2 = global_addr.i64 gv1
; check: $v2 = iadd_imm $v1, -16
; check: v2 = iadd_imm v1, -16
return v2
; check: return $v2
; check: return v2
}
function %deref(i64 vmctx) -> i64 {
@@ -22,11 +22,11 @@ function %deref(i64 vmctx) -> i64 {
ebb1(v1: i64):
v2 = global_addr.i64 gv2
; check: $(a1=$V) = iadd_imm $v1, -16
; check: $(a1=$V) = iadd_imm v1, -16
; check: $(p1=$V) = load.i64 $a1
; check: $v2 = iadd_imm $p1, 32
; check: v2 = iadd_imm $p1, 32
return v2
; check: return $v2
; check: return v2
}
function %sym() -> i64 {
@@ -35,9 +35,9 @@ function %sym() -> i64 {
ebb1:
v0 = global_addr.i64 gv0
; check: $v0 = globalsym_addr.i64 gv0
; check: v0 = globalsym_addr.i64 gv0
v1 = global_addr.i64 gv1
; check: $v1 = globalsym_addr.i64 gv1
; check: v1 = globalsym_addr.i64 gv1
v2 = bxor v0, v1
return v2
}
@@ -49,18 +49,18 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: $ebb0(
; check: ebb0(
v1 = heap_addr.i64 heap0, v0, 1
; Boundscheck should be eliminated.
; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 $v0
; nextln: $(haddr=$V) = iadd_imm $v999, 64
; nextln: $(xoff=$V) = uextend.i64 v0
; nextln: $(haddr=$V) = iadd_imm v999, 64
; nextln: $(hbase=$V) = load.i64 $haddr
; nextln: $v1 = iadd $hbase, $xoff
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+16
; nextln: $v2 = load.f32 $v1+16
; nextln: v2 = load.f32 v1+16
v3 = load.f32 v1+20
; nextln: $v3 = load.f32 $v1+20
; nextln: v3 = load.f32 v1+20
v4 = fadd v2, v3
return v4
}
@@ -73,7 +73,7 @@ ebb0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
; the `trap heap_oob` instruction as the terminator of the Ebb and moving
; the remainder of the instructions into an inaccessible Ebb.
; check: $ebb0(
; check: ebb0(
; nextln: trap heap_oob
; check: ebb1:
; nextln: v1 = iconst.i64 0
@@ -93,7 +93,7 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: $ebb0(
; check: ebb0(
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
; Boundscheck code
; check: $(oob=$V) = icmp
@@ -101,12 +101,12 @@ ebb0(v0: i32, v999: i64):
; nextln: trap heap_oob
; check: $ok:
; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 $v0
; nextln: $(haddr=$V) = iadd_imm.i64 $v999, 64
; nextln: $(xoff=$V) = uextend.i64 v0
; nextln: $(haddr=$V) = iadd_imm.i64 v999, 64
; nextln: $(hbase=$V) = load.i64 $haddr
; nextln: $v1 = iadd $hbase, $xoff
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+0x7fff_ffff
; nextln: $v2 = load.f32 $v1+0x7fff_ffff
; nextln: v2 = load.f32 v1+0x7fff_ffff
return v2
}
@@ -116,7 +116,7 @@ function %stkchk(i64 vmctx) spiderwasm {
gv0 = vmctx+64
ebb0(v0: i64):
; check: $ebb0(
; check: ebb0(
stack_check gv0
; check: $(limit=$V) = load.i64 notrap aligned
; check: $(flags=$V) = ifcmp_sp $limit