Remove support for entity variables in filecheck.

Now that the parser doesn't renumber indices, there's no need for entity
variables like $v0.
This commit is contained in:
Dan Gohman
2018-02-20 14:31:01 -08:00
parent a5b00b173e
commit 10dcfcacdb
29 changed files with 343 additions and 395 deletions

View File

@@ -160,20 +160,6 @@ directives in the test file. LLVM's :command:`FileCheck` command has a
``CHECK-LABEL:`` directive to help separate the output from different functions.
Cretonne's tests don't need this.
Filecheck variables
~~~~~~~~~~~~~~~~~~~
Cretonne's IL parser causes entities like values and EBBs to be renumbered. It
maintains a source mapping to resolve references in the text, but when a
function is written out as text as part of a test, all of the entities have the
new numbers. This can complicate the filecheck directives since they need to
refer to the new entity numbers, not the ones in the adjacent source text.
To help with this, the parser's source-to-entity mapping is made available as
predefined filecheck variables. A value by the source name ``v10`` can be
referenced as the filecheck variable ``$v10``. The variable expands to the
renumbered entity name.
`test cat`
----------
@@ -192,26 +178,9 @@ Example::
}
; sameln: function %r1() -> i32, f32 {
; nextln: ebb0:
; nextln: v0 = iconst.i32 3
; nextln: v1 = f32const 0.0
; nextln: return v0, v1
; nextln: }
Notice that the values ``v10`` and ``v20`` in the source were renumbered to
``v0`` and ``v1`` respectively during parsing. The equivalent test using
filecheck variables would be::
function %r1() -> i32, f32 {
ebb1:
v10 = iconst.i32 3
v20 = f32const 0.0
return v10, v20
}
; sameln: function %r1() -> i32, f32 {
; nextln: ebb0:
; nextln: $v10 = iconst.i32 3
; nextln: $v20 = f32const 0.0
; nextln: return $v10, $v20
; nextln: v10 = iconst.i32 3
; nextln: v20 = f32const 0.0
; nextln: return v10, v20
; nextln: }
`test verifier`

View File

@@ -11,8 +11,8 @@ function %cond_trap(i32) {
ebb0(v1: i32):
trapz v1, user67
return
; check: $ebb0($v1: i32
; nextln: $(f=$V) = ifcmp_imm $v1, 0
; check: ebb0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $f, user67
; nextln: return
}
@@ -21,8 +21,8 @@ function %cond_trap2(i32) {
ebb0(v1: i32):
trapnz v1, int_ovf
return
; check: $ebb0($v1: i32
; nextln: $(f=$V) = ifcmp_imm $v1, 0
; check: ebb0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif ne $f, int_ovf
; nextln: return
}
@@ -32,8 +32,8 @@ ebb0(v1: i32):
v2 = icmp_imm eq v1, 6
trapz v2, user7
return
; check: $ebb0($v1: i32
; check: brnz $v2, $(new=$EBB)
; check: ebb0(v1: i32
; check: brnz v2, $(new=$EBB)
; nextln: trap user7
; check: $new:
; nextln: return
@@ -44,8 +44,8 @@ ebb0(v1: i32):
v2 = icmp_imm eq v1, 6
trapnz v2, user9
return
; check: $ebb0($v1: i32
; check: brz $v2, $(new=$EBB)
; check: ebb0(v1: i32
; check: brz v2, $(new=$EBB)
; nextln: trap user9
; check: $new:
; nextln: return
@@ -55,7 +55,7 @@ function %f32const() -> f32 {
ebb0:
v1 = f32const 0x1.0p1
; check: $(tmp=$V) = iconst.i32
; check: $v1 = bitcast.f32 $tmp
; check: v1 = bitcast.f32 $tmp
return v1
}
@@ -63,17 +63,17 @@ function %f64const() -> f64 {
ebb0:
v1 = f64const 0x1.0p1
; check: $(tmp=$V) = iconst.i64
; check: $v1 = bitcast.f64 $tmp
; check: v1 = bitcast.f64 $tmp
return v1
}
function %select_f64(f64, f64, i32) -> f64 {
ebb0(v0: f64, v1: f64, v2: i32):
v3 = select v2, v0, v1
; check: brnz v2, $(new=$EBB)($v0)
; nextln: jump $new($v1)
; check: $new($v3: f64):
; nextln: return $v3
; check: brnz v2, $(new=$EBB)(v0)
; nextln: jump $new(v1)
; check: $new(v3: f64):
; nextln: return v3
return v3
}
@@ -81,17 +81,17 @@ function %f32_min(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
; check: $(vnat=$V) = x86_fmin $v0, $v1
; check: $(vnat=$V) = x86_fmin v0, v1
; nextln: jump $(done=$EBB)($vnat)
; check: $(uno=$EBB):
; nextln: $(vuno=$V) = fadd.f32 $v0, $v1
; nextln: $(vuno=$V) = fadd.f32 v0, v1
; nextln: jump $(done=$EBB)($vuno)
; check: $(ueq=$EBB):
; check: $(veq=$V) = bor.f32 $v0, $v1
; check: $(veq=$V) = bor.f32 v0, v1
; nextln: jump $(done=$EBB)($veq)
; check: $done($v2: f32):
; nextln: return $v2
; check: $done(v2: f32):
; nextln: return v2
}

View File

@@ -10,63 +10,63 @@ isa intel
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 $v0, $imin
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}

View File

@@ -10,30 +10,30 @@ isa intel
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = urem v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = sdiv v0, v1
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
return v2
; nextln: return $d
}
@@ -42,17 +42,17 @@ ebb0(v0: i64, v1: i64):
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}

View File

@@ -12,4 +12,4 @@ ebb0(v0: f32):
; check: function %floor(f32 [%xmm0]) -> f32 [%xmm0] native {
; check: sig0 = (f32) -> f32 native
; check: fn0 = sig0 %FloorF32
; check: $v1 = call fn0($v0)
; check: v1 = call fn0(v0)

View File

@@ -11,9 +11,9 @@ function %vmctx(i64 vmctx) -> i64 {
ebb1(v1: i64):
v2 = global_addr.i64 gv1
; check: $v2 = iadd_imm $v1, -16
; check: v2 = iadd_imm v1, -16
return v2
; check: return $v2
; check: return v2
}
function %deref(i64 vmctx) -> i64 {
@@ -22,11 +22,11 @@ function %deref(i64 vmctx) -> i64 {
ebb1(v1: i64):
v2 = global_addr.i64 gv2
; check: $(a1=$V) = iadd_imm $v1, -16
; check: $(a1=$V) = iadd_imm v1, -16
; check: $(p1=$V) = load.i64 $a1
; check: $v2 = iadd_imm $p1, 32
; check: v2 = iadd_imm $p1, 32
return v2
; check: return $v2
; check: return v2
}
function %sym() -> i64 {
@@ -35,9 +35,9 @@ function %sym() -> i64 {
ebb1:
v0 = global_addr.i64 gv0
; check: $v0 = globalsym_addr.i64 gv0
; check: v0 = globalsym_addr.i64 gv0
v1 = global_addr.i64 gv1
; check: $v1 = globalsym_addr.i64 gv1
; check: v1 = globalsym_addr.i64 gv1
v2 = bxor v0, v1
return v2
}
@@ -49,18 +49,18 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: $ebb0(
; check: ebb0(
v1 = heap_addr.i64 heap0, v0, 1
; Boundscheck should be eliminated.
; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 $v0
; nextln: $(haddr=$V) = iadd_imm $v999, 64
; nextln: $(xoff=$V) = uextend.i64 v0
; nextln: $(haddr=$V) = iadd_imm v999, 64
; nextln: $(hbase=$V) = load.i64 $haddr
; nextln: $v1 = iadd $hbase, $xoff
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+16
; nextln: $v2 = load.f32 $v1+16
; nextln: v2 = load.f32 v1+16
v3 = load.f32 v1+20
; nextln: $v3 = load.f32 $v1+20
; nextln: v3 = load.f32 v1+20
v4 = fadd v2, v3
return v4
}
@@ -73,7 +73,7 @@ ebb0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
; the `trap heap_oob` instruction as the terminator of the Ebb and moving
; the remainder of the instructions into an inaccessible Ebb.
; check: $ebb0(
; check: ebb0(
; nextln: trap heap_oob
; check: ebb1:
; nextln: v1 = iconst.i64 0
@@ -93,7 +93,7 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: $ebb0(
; check: ebb0(
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
; Boundscheck code
; check: $(oob=$V) = icmp
@@ -101,12 +101,12 @@ ebb0(v0: i32, v999: i64):
; nextln: trap heap_oob
; check: $ok:
; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 $v0
; nextln: $(haddr=$V) = iadd_imm.i64 $v999, 64
; nextln: $(xoff=$V) = uextend.i64 v0
; nextln: $(haddr=$V) = iadd_imm.i64 v999, 64
; nextln: $(hbase=$V) = load.i64 $haddr
; nextln: $v1 = iadd $hbase, $xoff
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+0x7fff_ffff
; nextln: $v2 = load.f32 $v1+0x7fff_ffff
; nextln: v2 = load.f32 v1+0x7fff_ffff
return v2
}
@@ -116,7 +116,7 @@ function %stkchk(i64 vmctx) spiderwasm {
gv0 = vmctx+64
ebb0(v0: i64):
; check: $ebb0(
; check: ebb0(
stack_check gv0
; check: $(limit=$V) = load.i64 notrap aligned
; check: $(flags=$V) = ifcmp_sp $limit

View File

@@ -5,15 +5,15 @@ function %int32(i32, i32) {
ebb0(v1: i32, v2: i32):
v10 = iadd v1, v2
; check: [R#0c]
; sameln: $v10 = iadd
; sameln: v10 = iadd
v11 = isub v1, v2
; check: [R#200c]
; sameln: $v11 = isub
; sameln: v11 = isub
v12 = imul v1, v2
; check: [R#10c]
; sameln: $v12 = imul
; sameln: v12 = imul
return
; check: [Iret#19]

View File

@@ -14,9 +14,9 @@ ebb0(v1: i32, v2: i32):
v3, v4 = iadd_cout v1, v2
return v3, v4
}
; check: $v3 = iadd $v1, $v2
; check: $v4 = icmp ult $v3, $v1
; check: return $v3, $v4
; check: v3 = iadd v1, v2
; check: v4 = icmp ult v3, v1
; check: return v3, v4
; Expanding illegal immediate constants.
; Note that at some point we'll probably expand the iconst as well.
@@ -26,8 +26,8 @@ ebb0(v0: i32):
return v1
}
; check: $(cst=$V) = iconst.i32 0x3b9a_ca00
; check: $v1 = iadd $v0, $cst
; check: return $v1
; check: v1 = iadd v0, $cst
; check: return v1
function %bitclear(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):

View File

@@ -8,10 +8,10 @@ isa riscv
function %int_split_args(i64) -> i64 {
ebb0(v0: i64):
; check: $ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: $v0 = iconcat $v0l, $v0h
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: v0 = iconcat $v0l, $v0h
v1 = iadd_imm v0, 1
; check: $(v1l=$V), $(v1h=$V) = isplit $v1
; check: $(v1l=$V), $(v1h=$V) = isplit v1
; check: return $v1l, $v1h, $link
return v1
}
@@ -22,10 +22,10 @@ function %split_call_arg(i32) {
ebb0(v0: i32):
v1 = uextend.i64 v0
call fn1(v1)
; check: $(v1l=$V), $(v1h=$V) = isplit $v1
; check: call $fn1($v1l, $v1h)
; check: $(v1l=$V), $(v1h=$V) = isplit v1
; check: call fn1($v1l, $v1h)
call fn2(v0, v1)
; check: call $fn2($v0, $V, $V)
; check: call fn2(v0, $V, $V)
return
}
@@ -33,11 +33,11 @@ function %split_ret_val() {
fn1 = function %foo() -> i64
ebb0:
v1 = call fn1()
; check: $ebb0($(link=$V): i32):
; nextln: $(v1l=$V), $(v1h=$V) = call $fn1()
; check: $v1 = iconcat $v1l, $v1h
; check: ebb0($(link=$V): i32):
; nextln: $(v1l=$V), $(v1h=$V) = call fn1()
; check: v1 = iconcat $v1l, $v1h
jump ebb1(v1)
; check: jump $ebb1($v1)
; check: jump ebb1(v1)
ebb1(v10: i64):
jump ebb1(v10)
@@ -48,11 +48,11 @@ function %split_ret_val2() {
fn1 = function %foo() -> i32, i64
ebb0:
v1, v2 = call fn1()
; check: $ebb0($(link=$V): i32):
; nextln: $v1, $(v2l=$V), $(v2h=$V) = call $fn1()
; check: $v2 = iconcat $v2l, $v2h
; check: ebb0($(link=$V): i32):
; nextln: v1, $(v2l=$V), $(v2h=$V) = call fn1()
; check: v2 = iconcat $v2l, $v2h
jump ebb1(v1, v2)
; check: jump $ebb1($v1, $v2)
; check: jump ebb1(v1, v2)
ebb1(v9: i32, v10: i64):
jump ebb1(v9, v10)
@@ -60,10 +60,10 @@ ebb1(v9: i32, v10: i64):
function %int_ext(i8, i8 sext, i8 uext) -> i8 uext {
ebb0(v1: i8, v2: i8, v3: i8):
; check: $ebb0($v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
; check: $v2 = ireduce.i8 $v2x
; check: $v3 = ireduce.i8 $v3x
; check: $(v1x=$V) = uextend.i32 $v1
; check: ebb0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
; check: v2 = ireduce.i8 $v2x
; check: v3 = ireduce.i8 $v3x
; check: $(v1x=$V) = uextend.i32 v1
; check: return $v1x, $link
return v1
}
@@ -73,11 +73,11 @@ function %ext_ret_val() {
fn1 = function %foo() -> i8 sext
ebb0:
v1 = call fn1()
; check: $ebb0($V: i32):
; nextln: $(rv=$V) = call $fn1()
; check: $v1 = ireduce.i8 $rv
; check: ebb0($V: i32):
; nextln: $(rv=$V) = call fn1()
; check: v1 = ireduce.i8 $rv
jump ebb1(v1)
; check: jump $ebb1($v1)
; check: jump ebb1(v1)
ebb1(v10: i8):
jump ebb1(v10)
@@ -85,16 +85,16 @@ ebb1(v10: i8):
function %vector_split_args(i64x4) -> i64x4 {
ebb0(v0: i64x4):
; check: $ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
; check: ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
; check: $(v0a=$V) = iconcat $v0al, $v0ah
; check: $(v0b=$V) = iconcat $v0bl, $v0bh
; check: $(v0ab=$V) = vconcat $v0a, $v0b
; check: $(v0c=$V) = iconcat $v0cl, $v0ch
; check: $(v0d=$V) = iconcat $v0dl, $v0dh
; check: $(v0cd=$V) = vconcat $v0c, $v0d
; check: $v0 = vconcat $v0ab, $v0cd
; check: v0 = vconcat $v0ab, $v0cd
v1 = bxor v0, v0
; check: $(v1ab=$V), $(v1cd=$V) = vsplit $v1
; check: $(v1ab=$V), $(v1cd=$V) = vsplit v1
; check: $(v1a=$V), $(v1b=$V) = vsplit $v1ab
; check: $(v1al=$V), $(v1ah=$V) = isplit $v1a
; check: $(v1bl=$V), $(v1bh=$V) = isplit $v1b
@@ -117,7 +117,7 @@ function %indirect_arg(i32, f32x2) {
sig1 = (f32x2) native
ebb0(v0: i32, v1: f32x2):
call_indirect sig1, v0(v1)
; check: call_indirect $sig1, $v0($V, $V)
; check: call_indirect sig1, v0($V, $V)
return
}
@@ -128,7 +128,7 @@ function %stack_args(i32) {
ebb0(v0: i32):
v1 = iconst.i64 1
call fn1(v1, v1, v1, v1, v0)
; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill $v0
; check: call $fn1($(=.*), $v0s)
; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill v0
; check: call fn1($(=.*), $v0s)
return
}

View File

@@ -9,12 +9,12 @@ ebb0(v1: i64, v2: i64):
v3 = band v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#ec
; sameln: $(v3l=$V) = band $v1l, $v2l
; check: [R#ec
; sameln: $(v3h=$V) = band $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %bitwise_or(i64, i64) -> i64 {
@@ -22,12 +22,12 @@ ebb0(v1: i64, v2: i64):
v3 = bor v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#cc
; sameln: $(v3l=$V) = bor $v1l, $v2l
; check: [R#cc
; sameln: $(v3h=$V) = bor $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %bitwise_xor(i64, i64) -> i64 {
@@ -35,12 +35,12 @@ ebb0(v1: i64, v2: i64):
v3 = bxor v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#8c
; sameln: $(v3l=$V) = bxor $v1l, $v2l
; check: [R#8c
; sameln: $(v3h=$V) = bxor $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %arith_add(i64, i64) -> i64 {
@@ -51,7 +51,7 @@ ebb0(v1: i64, v2: i64):
v3 = iadd v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#0c
; sameln: $(v3l=$V) = iadd $v1l, $v2l
; check: $(c=$V) = icmp ult $v3l, $v1l
@@ -60,5 +60,5 @@ ebb0(v1: i64, v2: i64):
; check: $(c_int=$V) = bint.i32 $c
; check: [R#0c
; sameln: $(v3h=$V) = iadd $v3h1, $c_int
; check: $v3 = iconcat $v3l, $v3h
; check: v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link

View File

@@ -6,12 +6,12 @@ isa riscv
function %simple(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump $ebb1($v1l, $v1h)
; check: jump ebb1($v1l, $v1h)
ebb1(v3: i64):
; check: $ebb1($(v3l=$V): i32, $(v3h=$V): i32):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
@@ -21,17 +21,17 @@ ebb1(v3: i64):
function %multi(i64) -> i64 {
ebb1(v1: i64):
; check: $ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
; check: ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump ebb2(v1, v1)
; check: jump $ebb2($v1l, $v1l, $v1h, $v1h)
; check: jump ebb2($v1l, $v1l, $v1h, $v1h)
ebb2(v2: i64, v3: i64):
; check: $ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
; check: ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump ebb3(v2)
; check: jump $ebb3($v2l, $v2h)
; check: jump ebb3($v2l, $v2h)
ebb3(v4: i64):
; check: $ebb3($(v4l=$V): i32, $(v4h=$V): i32):
; check: ebb3($(v4l=$V): i32, $(v4h=$V): i32):
v5 = band v4, v3
; check: $(v5l=$V) = band $v4l, $v3l
; check: $(v5h=$V) = band $v4h, $v3h
@@ -41,15 +41,15 @@ ebb3(v4: i64):
function %loop(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump $ebb1($v1l, $v1h)
; check: jump ebb1($v1l, $v1h)
ebb1(v3: i64):
; check: $ebb1($(v3l=$V): i32, $(v3h=$V): i32):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
jump ebb1(v4)
; check: jump $ebb1($v4l, $v4h)
; check: jump ebb1($v4l, $v4h)
}

View File

@@ -26,11 +26,11 @@ ebb1(v91: i32):
jump ebb0(v91)
}
; sameln: function %onearg(i32) native {
; nextln: ebb0($v90: i32):
; nextln: jump ebb1($v90)
; nextln: ebb0(v90: i32):
; nextln: jump ebb1(v90)
; nextln:
; nextln: ebb1($v91: i32):
; nextln: jump ebb0($v91)
; nextln: ebb1(v91: i32):
; nextln: jump ebb0(v91)
; nextln: }
; Jumps with 2 args.
@@ -42,11 +42,11 @@ ebb1(v92: i32, v93: f32):
jump ebb0(v92, v93)
}
; sameln: function %twoargs(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: jump ebb1($v90, $v91)
; nextln: ebb0(v90: i32, v91: f32):
; nextln: jump ebb1(v90, v91)
; nextln:
; nextln: ebb1($v92: i32, $v93: f32):
; nextln: jump ebb0($v92, $v93)
; nextln: ebb1(v92: i32, v93: f32):
; nextln: jump ebb0(v92, v93)
; nextln: }
; Branches with no arguments. The '()' empty argument list is optional.
@@ -58,11 +58,11 @@ ebb1:
brnz v90, ebb1()
}
; sameln: function %minimal(i32) native {
; nextln: ebb0($v90: i32):
; nextln: brz $v90, ebb1
; nextln: ebb0(v90: i32):
; nextln: brz v90, ebb1
; nextln:
; nextln: ebb1:
; nextln: brnz.i32 $v90, ebb1
; nextln: brnz.i32 v90, ebb1
; nextln: }
function %twoargs(i32, f32) {
@@ -73,11 +73,11 @@ ebb1(v92: i32, v93: f32):
brnz v90, ebb0(v92, v93)
}
; sameln: function %twoargs(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: brz $v90, ebb1($v90, $v91)
; nextln: ebb0(v90: i32, v91: f32):
; nextln: brz v90, ebb1(v90, v91)
; nextln:
; nextln: ebb1($v92: i32, $v93: f32):
; nextln: brnz.i32 $v90, ebb0($v92, $v93)
; nextln: ebb1(v92: i32, v93: f32):
; nextln: brnz.i32 v90, ebb0(v92, v93)
; nextln: }
function %jumptable(i32) {
@@ -97,8 +97,8 @@ ebb40:
; sameln: function %jumptable(i32) native {
; check: jt2 = jump_table 0, 0, ebb10, ebb40, ebb20, ebb30
; check: jt200 = jump_table 0
; check: ebb10($v3: i32):
; nextln: br_table $v3, jt2
; check: ebb10(v3: i32):
; nextln: br_table v3, jt2
; nextln: trap user1
; nextln:
; nextln: ebb20:

View File

@@ -18,9 +18,9 @@ ebb1:
}
; sameln: function %r1() -> i32, f32 spiderwasm {
; nextln: ebb1:
; nextln: $v1 = iconst.i32 3
; nextln: $v2 = f32const 0.0
; nextln: return $v1, $v2
; nextln: v1 = iconst.i32 3
; nextln: v2 = f32const 0.0
; nextln: return v1, v2
; nextln: }
function %signatures() {
@@ -30,11 +30,11 @@ function %signatures() {
fn8 = function %bar(i32) -> b1
}
; sameln: function %signatures() native {
; check: $sig10 = () native
; check: $sig11 = (i32, f64) -> i32, b1 spiderwasm
; check: sig10 = () native
; check: sig11 = (i32, f64) -> i32, b1 spiderwasm
; check: sig12 = (i32) -> b1 native
; check: $fn5 = $sig11 %foo
; check: $fn8 = sig12 %bar
; check: fn5 = sig11 %foo
; check: fn8 = sig12 %bar
; check: }
function %direct() {
@@ -48,9 +48,9 @@ ebb0:
v2, v3 = call fn2()
return
}
; check: call $fn0()
; check: $v1 = call $fn1()
; check: $v2, $v3 = call $fn2()
; check: call fn0()
; check: v1 = call fn1()
; check: v2, v3 = call fn2()
; check: return
function %indirect(i64) {
@@ -64,9 +64,9 @@ ebb0(v0: i64):
v3, v4 = call_indirect sig2, v1()
return
}
; check: $v1 = call_indirect $sig1, $v0()
; check: call_indirect $sig0, $v1($v0)
; check: $v3, $v4 = call_indirect $sig2, $v1()
; check: v1 = call_indirect sig1, v0()
; check: call_indirect sig0, v1(v0)
; check: v3, v4 = call_indirect sig2, v1()
; check: return
function %long_call() {
@@ -78,8 +78,8 @@ ebb0:
call_indirect sig0, v0()
return
}
; check: $v0 = func_addr.i32 $fn0
; check: call_indirect $sig0, $v0()
; check: v0 = func_addr.i32 fn0
; check: call_indirect sig0, v0()
; check: return
; Special purpose function arguments
@@ -88,6 +88,6 @@ ebb0(v1: i32, v2: i32, v3: i32, v4: i32):
return v4, v2, v3, v1
}
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret native {
; check: ebb0($v1: i32, $v2: i32, $v3: i32, $v4: i32):
; check: return $v4, $v2, $v3, $v1
; check: ebb0(v1: i32, v2: i32, v3: i32, v4: i32):
; check: return v4, v2, v3, v1
; check: }

View File

@@ -18,11 +18,11 @@ ebb201:
ebb202:
trap oob
}
; check: $v1 = ifcmp_imm $v0, 17
; check: brif eq $v1, $ebb201
; check: brif ugt $v1, $ebb202
; check: $v3 = ifcmp $v0, $v2
; check: $v4 = trueif eq $v3
; check: v1 = ifcmp_imm v0, 17
; check: brif eq v1, ebb201
; check: brif ugt v1, ebb202
; check: v3 = ifcmp v0, v2
; check: v4 = trueif eq v3
function %fflags(f32) {
ebb200(v0: f32):
@@ -40,7 +40,7 @@ ebb201:
ebb202:
trap oob
}
; check: $v2 = ffcmp $v0, $v1
; check: brff eq $v2, $ebb201
; check: brff ord $v2, $ebb202
; check: $v3 = trueff gt $v2
; check: v2 = ffcmp v0, v1
; check: brff eq v2, ebb201
; check: brff ord v2, ebb202
; check: v3 = trueff gt v2

View File

@@ -14,11 +14,11 @@ ebb1(v0: i32 [%x8], v1: i32):
@a5 [Iret#5] return v0, v8
}
; sameln: function %foo(i32, i32) native {
; nextln: $ebb1($v0: i32 [%x8], $v1: i32):
; nextln: [-,-]$WS $v2 = iadd $v0, $v1
; nextln: ebb1(v0: i32 [%x8], v1: i32):
; nextln: [-,-]$WS v2 = iadd v0, v1
; nextln: [-]$WS trap heap_oob
; nextln: [R#1234,%x5,%x11]$WS $v6, $v7 = iadd_cout $v2, $v0
; nextln: [Rshamt#beef,%x25]$WS $v8 = ishl_imm $v6, 2
; nextln: @0055 [-,-]$WS $v9 = iadd $v8, $v7
; nextln: @00a5 [Iret#05]$WS return $v0, $v8
; nextln: [R#1234,%x5,%x11]$WS v6, v7 = iadd_cout v2, v0
; nextln: [Rshamt#beef,%x25]$WS v8 = ishl_imm v6, 2
; nextln: @0055 [-,-]$WS v9 = iadd v8, v7
; nextln: @00a5 [Iret#05]$WS return v0, v8
; nextln: }

View File

@@ -3,34 +3,34 @@ test verifier
function %vmglobal() -> i32 {
gv3 = vmctx+16
; check: $gv3 = vmctx+16
; check: gv3 = vmctx+16
gv4 = vmctx+0
; check: $gv4 = vmctx
; check: gv4 = vmctx
; not: +0
gv5 = vmctx -256
; check: $gv5 = vmctx-256
; check: gv5 = vmctx-256
ebb0:
v1 = global_addr.i32 gv3
; check: $v1 = global_addr.i32 $gv3
; check: v1 = global_addr.i32 gv3
return v1
}
function %deref() -> i32 {
gv3 = vmctx+16
gv4 = deref(gv3)-32
; check: $gv4 = deref($gv3)-32
; check: gv4 = deref(gv3)-32
ebb0:
v1 = global_addr.i32 gv4
; check: $v1 = global_addr.i32 $gv4
; check: v1 = global_addr.i32 gv4
return v1
}
; Refer to a global variable before it's been declared.
function %backref() -> i32 {
gv1 = deref(gv2)-32
; check: $gv1 = deref($gv2)-32
; check: gv1 = deref(gv2)-32
gv2 = vmctx+16
; check: $gv2 = vmctx+16
; check: gv2 = vmctx+16
ebb0:
v1 = global_addr.i32 gv1
return v1
@@ -38,14 +38,14 @@ ebb0:
function %sym() -> i32 {
gv0 = globalsym %something
; check: $gv0 = globalsym %something
; check: gv0 = globalsym %something
gv1 = globalsym u8:9
; check: $gv1 = globalsym u8:9
; check: gv1 = globalsym u8:9
ebb0:
v0 = global_addr.i32 gv0
; check: $v0 = global_addr.i32 $gv0
; check: v0 = global_addr.i32 gv0
v1 = global_addr.i32 gv1
; check: $v1 = global_addr.i32 $gv1
; check: v1 = global_addr.i32 gv1
v2 = bxor v0, v1
return v2
}
@@ -56,11 +56,11 @@ function %sheap(i32) -> i64 {
heap2 = static gv5, guard 0x1000, bound 0x1_0000
gv5 = vmctx+64
; check: $heap1 = static reserved_reg, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
; check: $heap2 = static $gv5, min 0, bound 0x0001_0000, guard 4096
; check: heap1 = static reserved_reg, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, guard 4096
ebb0(v1: i32):
v2 = heap_addr.i64 heap1, v1, 0
; check: $v2 = heap_addr.i64 $heap1, $v1, 0
; check: v2 = heap_addr.i64 heap1, v1, 0
return v2
}
@@ -71,10 +71,10 @@ function %dheap(i32) -> i64 {
gv5 = vmctx+64
gv6 = vmctx+72
; check: $heap1 = dynamic reserved_reg, min 0x0001_0000, bound $gv6, guard 0x8000_0000
; check: $heap2 = dynamic $gv5, min 0, bound $gv6, guard 4096
; check: heap1 = dynamic reserved_reg, min 0x0001_0000, bound gv6, guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, guard 4096
ebb0(v1: i32):
v2 = heap_addr.i64 heap2, v1, 0
; check: $v2 = heap_addr.i64 $heap2, $v1, 0
; check: v2 = heap_addr.i64 heap2, v1, 0
return v2
}

View File

@@ -1,14 +1,8 @@
; The .cton parser can't preserve the actual entity numbers in the input file
; since entities are numbered as they are created. For entities declared in the
; preamble, this is no problem, but for EBB and value references, mapping
; source numbers to real numbers can be a problem.
;
; It is possible to refer to instructions and EBBs that have not yet been
; defined in the lexical order, so the parser needs to rewrite these references
; after the fact.
; defined in the lexical order.
test cat
; Check that defining numbers are rewritten.
; Defining numbers.
function %defs() {
ebb100(v20: i32):
v1000 = iconst.i32x8 5
@@ -16,9 +10,9 @@ ebb100(v20: i32):
trap user4
}
; sameln: function %defs() native {
; nextln: $ebb100($v20: i32):
; nextln: $v1000 = iconst.i32x8 5
; nextln: $v9200 = f64const 0x1.0000000000000p2
; nextln: ebb100(v20: i32):
; nextln: v1000 = iconst.i32x8 5
; nextln: v9200 = f64const 0x1.0000000000000p2
; nextln: trap user4
; nextln: }
@@ -30,8 +24,8 @@ ebb100(v20: i32):
jump ebb100(v1000)
}
; sameln: function %use_value() native {
; nextln: ebb100($v20: i32):
; nextln: $v1000 = iadd_imm $v20, 5
; nextln: $v200 = iadd $v20, $v1000
; nextln: jump ebb100($v1000)
; nextln: ebb100(v20: i32):
; nextln: v1000 = iadd_imm v20, 5
; nextln: v200 = iadd v20, v1000
; nextln: jump ebb100(v1000)
; nextln: }

View File

@@ -4,21 +4,21 @@ test verifier
function %add_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 {
ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
v10, v11 = iadd_cout v1, v4
;check: $v10, $v11 = iadd_cout $v1, $v4
;check: v10, v11 = iadd_cout v1, v4
v20, v21 = iadd_carry v2, v5, v11
; check: $v20, $v21 = iadd_carry $v2, $v5, $v11
; check: v20, v21 = iadd_carry v2, v5, v11
v30 = iadd_cin v3, v6, v21
; check: $v30 = iadd_cin $v3, $v6, $v21
; check: v30 = iadd_cin v3, v6, v21
return v10, v20, v30
}
function %sub_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 {
ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
v10, v11 = isub_bout v1, v4
;check: $v10, $v11 = isub_bout $v1, $v4
;check: v10, v11 = isub_bout v1, v4
v20, v21 = isub_borrow v2, v5, v11
; check: $v20, $v21 = isub_borrow $v2, $v5, $v11
; check: v20, v21 = isub_borrow v2, v5, v11
v30 = isub_bin v3, v6, v21
; check: $v30 = isub_bin $v3, $v6, $v21
; check: v30 = isub_bin v3, v6, v21
return v10, v20, v30
}

View File

@@ -20,9 +20,9 @@ ebb0:
}
; sameln: function %ivalues() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32 2
; nextln: $v1 = iconst.i8 6
; nextln: $v2 = ishl $v0, $v1
; nextln: v0 = iconst.i32 2
; nextln: v1 = iconst.i8 6
; nextln: v2 = ishl v0, v1
; nextln: }
; Create and use values.
@@ -36,10 +36,10 @@ ebb0:
}
; sameln: function %bvalues() native {
; nextln: ebb0:
; nextln: $v0 = bconst.b32 true
; nextln: $v1 = bconst.b8 false
; nextln: $v2 = bextend.b32 v1
; nextln: $v3 = bxor v0, v2
; nextln: v0 = bconst.b32 true
; nextln: v1 = bconst.b8 false
; nextln: v2 = bextend.b32 v1
; nextln: v3 = bxor v0, v2
; nextln: }
; Polymorphic instruction controlled by second operand.
@@ -48,8 +48,8 @@ ebb0(v90: i32, v91: i32, v92: b1):
v0 = select v92, v90, v91
}
; sameln: function %select() native {
; nextln: ebb0($v90: i32, $v91: i32, $v92: b1):
; nextln: $v0 = select $v92, $v90, $v91
; nextln: ebb0(v90: i32, v91: i32, v92: b1):
; nextln: v0 = select v92, v90, v91
; nextln: }
; Polymorphic instruction controlled by third operand.
@@ -71,9 +71,9 @@ ebb0:
}
; sameln: function %lanes() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32x4 2
; nextln: $v1 = extractlane $v0, 3
; nextln: $v2 = insertlane $v0, 1, $v1
; nextln: v0 = iconst.i32x4 2
; nextln: v1 = extractlane v0, 3
; nextln: v2 = insertlane v0, 1, v1
; nextln: }
; Integer condition codes.
@@ -86,12 +86,12 @@ ebb0(v90: i32, v91: i32):
br_icmp eq v90, v91, ebb0(v91, v90)
}
; sameln: function %icmp(i32, i32) native {
; nextln: ebb0($v90: i32, $v91: i32):
; nextln: $v0 = icmp eq $v90, $v91
; nextln: $v1 = icmp ult $v90, $v91
; nextln: $v2 = icmp_imm sge $v90, -12
; nextln: $v3 = irsub_imm $v91, 45
; nextln: br_icmp eq $v90, $v91, ebb0($v91, $v90)
; nextln: ebb0(v90: i32, v91: i32):
; nextln: v0 = icmp eq v90, v91
; nextln: v1 = icmp ult v90, v91
; nextln: v2 = icmp_imm sge v90, -12
; nextln: v3 = irsub_imm v91, 45
; nextln: br_icmp eq v90, v91, ebb0(v91, v90)
; nextln: }
; Floating condition codes.
@@ -102,10 +102,10 @@ ebb0(v90: f32, v91: f32):
v2 = fcmp lt v90, v91
}
; sameln: function %fcmp(f32, f32) native {
; nextln: ebb0($v90: f32, $v91: f32):
; nextln: $v0 = fcmp eq $v90, $v91
; nextln: $v1 = fcmp uno $v90, $v91
; nextln: $v2 = fcmp lt $v90, $v91
; nextln: ebb0(v90: f32, v91: f32):
; nextln: v0 = fcmp eq v90, v91
; nextln: v1 = fcmp uno v90, v91
; nextln: v2 = fcmp lt v90, v91
; nextln: }
; The bitcast instruction has two type variables: The controlling type variable
@@ -116,9 +116,9 @@ ebb0(v90: i32, v91: f32):
v1 = bitcast.i32 v91
}
; sameln: function %bitcast(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: $v0 = bitcast.i8x4 $v90
; nextln: $v1 = bitcast.i32 $v91
; nextln: ebb0(v90: i32, v91: f32):
; nextln: v0 = bitcast.i8x4 v90
; nextln: v1 = bitcast.i32 v91
; nextln: }
; Stack slot references
@@ -136,17 +136,17 @@ ebb0:
stack_store v2, ss2
}
; sameln: function %stack() native {
; check: $ss2 = local 4
; check: $ss3 = incoming_arg 4, offset 8
; check: $ss4 = outgoing_arg 4
; check: $ss5 = emergency_slot 4
; check: $ss10 = spill_slot 8
; check: ss2 = local 4
; check: ss3 = incoming_arg 4, offset 8
; check: ss4 = outgoing_arg 4
; check: ss5 = emergency_slot 4
; check: ss10 = spill_slot 8
; check: ebb0:
; nextln: $v1 = stack_load.i32 $ss10
; nextln: $v2 = stack_load.i32 $ss10+4
; nextln: stack_store $v1, $ss10+2
; nextln: stack_store $v2, $ss2
; nextln: v1 = stack_load.i32 ss10
; nextln: v2 = stack_load.i32 ss10+4
; nextln: stack_store v1, ss10+2
; nextln: stack_store v2, ss2
; Memory access instructions.
function %memory(i32) {
@@ -163,17 +163,17 @@ ebb0(v1: i32):
store notrap aligned v3, v1-12
}
; sameln: function %memory(i32) native {
; nextln: ebb0($v1: i32):
; nextln: $v2 = load.i64 $v1
; nextln: $v3 = load.i64 aligned $v1
; nextln: $v4 = load.i64 notrap $v1
; nextln: $v5 = load.i64 notrap aligned $v1
; nextln: $v6 = load.i64 notrap aligned $v1
; nextln: $v7 = load.i64 $v1-12
; nextln: $v8 = load.i64 notrap $v1+0x0001_0000
; nextln: store $v2, $v1
; nextln: store aligned $v3, $v1+12
; nextln: store notrap aligned $v3, $v1-12
; nextln: ebb0(v1: i32):
; nextln: v2 = load.i64 v1
; nextln: v3 = load.i64 aligned v1
; nextln: v4 = load.i64 notrap v1
; nextln: v5 = load.i64 notrap aligned v1
; nextln: v6 = load.i64 notrap aligned v1
; nextln: v7 = load.i64 v1-12
; nextln: v8 = load.i64 notrap v1+0x0001_0000
; nextln: store v2, v1
; nextln: store aligned v3, v1+12
; nextln: store notrap aligned v3, v1-12
; Register diversions.
; This test file has no ISA, so we can unly use register unit numbers.
@@ -188,12 +188,12 @@ ebb0(v1: i32):
return
}
; sameln: function %diversion(i32) native {
; nextln: $ss0 = spill_slot 4
; check: ebb0($v1: i32):
; nextln: regmove $v1, %10 -> %20
; nextln: regmove $v1, %20 -> %10
; nextln: regspill $v1, %10 -> $ss0
; nextln: regfill $v1, $ss0 -> %10
; nextln: ss0 = spill_slot 4
; check: ebb0(v1: i32):
; nextln: regmove v1, %10 -> %20
; nextln: regmove v1, %20 -> %10
; nextln: regspill v1, %10 -> ss0
; nextln: regfill v1, ss0 -> %10
; nextln: return
; nextln: }
@@ -222,12 +222,12 @@ ebb0(v0: i32):
return
}
; sameln: function %cond_traps(i32)
; nextln: ebb0($v0: i32):
; nextln: trapz $v0, stk_ovf
; nextln: $v1 = ifcmp_imm v0, 5
; nextln: trapif ugt $v1, oob
; nextln: $v2 = bitcast.f32 $v1
; nextln: $v3 = ffcmp $v2, $v2
; nextln: trapff uno $v3, int_ovf
; nextln: ebb0(v0: i32):
; nextln: trapz v0, stk_ovf
; nextln: v1 = ifcmp_imm v0, 5
; nextln: trapif ugt v1, oob
; nextln: v2 = bitcast.f32 v1
; nextln: v3 = ffcmp v2, v2
; nextln: trapff uno v3, int_ovf
; nextln: return
; nextln: }

View File

@@ -17,7 +17,7 @@ ebb0(v1: i32, v2: i32):
function %dead_arg(i32, i32) -> i32{
ebb0(v1: i32, v2: i32):
; not: regmove
; check: return $v1
; check: return v1
return v1
}
@@ -25,8 +25,8 @@ ebb0(v1: i32, v2: i32):
function %move1(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> %x10
; nextln: return $v2
; check: regmove v2, %x11 -> %x10
; nextln: return v2
return v2
}
@@ -34,10 +34,10 @@ ebb0(v1: i32, v2: i32):
function %swap(i32, i32) -> i32, i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> $(tmp=$RX)
; nextln: regmove $v1, %x10 -> %x11
; nextln: regmove $v2, $tmp -> %x10
; nextln: return $v2, $v1
; check: regmove v2, %x11 -> $(tmp=$RX)
; nextln: regmove v1, %x10 -> %x11
; nextln: regmove v2, $tmp -> %x10
; nextln: return v2, v1
return v2, v1
}

View File

@@ -23,8 +23,8 @@ ebb1(v10: i32):
function %trivial(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($cp1)
; check: $(cp1=$V) = copy v0
; nextln: brnz v0, ebb1($cp1)
brnz v0, ebb1(v0)
; not: copy
v1 = iadd_imm v0, 7
@@ -39,8 +39,8 @@ ebb1(v10: i32):
; A value is used as an SSA argument twice in the same branch.
function %dualuse(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($cp1, $v0)
; check: $(cp1=$V) = copy v0
; nextln: brnz v0, ebb1($cp1, v0)
brnz v0, ebb1(v0, v0)
v1 = iadd_imm v0, 7
v2 = iadd_imm v1, 56
@@ -55,15 +55,15 @@ ebb1(v10: i32, v11: i32):
; The interference can be broken with a copy at either branch.
function %interference(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp0=$V) = copy $v0
; check: $(cp0=$V) = copy v0
; not: copy
; check: brnz $v0, ebb1($cp0)
; check: brnz v0, ebb1($cp0)
brnz v0, ebb1(v0)
v1 = iadd_imm v0, 7
; v1 and v0 interfere here:
v2 = iadd_imm v0, 8
; not: copy
; check: jump $ebb1($v1)
; check: jump ebb1(v1)
jump ebb1(v1)
ebb1(v10: i32):
@@ -81,13 +81,13 @@ ebb0(v0: i32):
ebb1(v10: i32, v11: i32):
; v11 needs to be isolated because it interferes with v10.
; check: $ebb1($v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: $v11 = copy $nv11a
; check: ebb1(v10: i32 [$LOC], $(nv11a=$V): i32 [$LOC])
; check: v11 = copy $nv11a
v12 = iadd v10, v11
v13 = icmp ult v12, v0
; check: $(nv11b=$V) = copy $v11
; check: $(nv11b=$V) = copy v11
; not: copy
; check: brnz $v13, $ebb1($nv11b, $v12)
; check: brnz v13, ebb1($nv11b, v12)
brnz v13, ebb1(v11, v12)
return v12
}

View File

@@ -20,10 +20,10 @@ function %tied_alive() -> i32 {
ebb0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; check: $(v0c=$V) = copy $v0
; check: $v2 = isub $v0c, $v1
; check: $(v0c=$V) = copy v0
; check: v2 = isub $v0c, v1
v2 = isub v0, v1
; check: $v3 = iadd $v2, $v0
; check: v3 = iadd v2, v0
v3 = iadd v2, v0
return v3
}
@@ -32,11 +32,11 @@ ebb0:
function %fixed_op() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: regmove v0, %rax -> %rcx
v2 = ishl v1, v0
return v2
}
@@ -45,14 +45,14 @@ ebb0:
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: regmove v0, %rax -> %rcx
v2 = ishl v1, v0
; check: regmove $v0, %rcx -> $REG
; check: regmove $v2, $REG -> %rcx
; check: regmove v0, %rcx -> $REG
; check: regmove v2, $REG -> %rcx
v3 = ishl v0, v2
return v3
@@ -62,12 +62,12 @@ ebb0:
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
; sameln: v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: $v2 = ishl $v1, $v0
; check: regmove v0, %rax -> %rcx
; check: v2 = ishl v1, v0
v2 = ishl v1, v0
; Now v0 is globally allocated to %rax, but diverted to %rcx.
@@ -77,6 +77,6 @@ ebb0:
; check: ,%rcx]
; sameln: isub
; Move it into place for the return value.
; check: regmove $v3, %rcx -> %rax
; check: regmove v3, %rcx -> %rax
return v3
}

View File

@@ -19,12 +19,12 @@ ebb0(v0: i32):
jump ebb2(v3, v2, v0)
ebb2(v4: i32, v5: i32, v7: i32):
; check: $ebb2
; check: ebb2
v6 = iadd v4, v5
v8 = iconst.i32 -1
; v7 is killed here and v9 gets the same register.
v9 = iadd v7, v8
; check: $v9 = iadd $v7, $v8
; check: v9 = iadd v7, v8
; Here v9 the brnz control appears to interfere with v9 the EBB argument,
; so divert_fixed_input_conflicts() calls add_var(v9), which is ok. The
; add_var sanity checks got confused when no fixed assignment could be
@@ -32,7 +32,7 @@ ebb2(v4: i32, v5: i32, v7: i32):
;
; We should be able to handle this situation without making copies of v9.
brnz v9, ebb2(v5, v6, v9)
; check: brnz $v9, $ebb2($V, $V, $v9)
; check: brnz v9, ebb2($V, $V, v9)
jump ebb3
ebb3:

View File

@@ -39,9 +39,9 @@ ebb1:
; flag so it can be reassigned to a different global register.
function %pr218(i64 [%rdi], i64 [%rsi], i64 [%rdx], i64 [%rcx]) -> i64 [%rax] {
ebb0(v0: i64, v1: i64, v2: i64, v3: i64):
; check: regmove $v3, %rcx ->
; check: regmove v3, %rcx ->
v4 = ushr v0, v0
; check: $v4 = copy
; check: v4 = copy
jump ebb1
ebb1:

View File

@@ -52,8 +52,8 @@ ebb6:
v25 = load.i64 v24
v8 = iadd v25, v23
v9 = load.i32 v8+56
; check: $v9 = spill
; check: brnz $V, $ebb3($v9)
; check: v9 = spill
; check: brnz $V, ebb3(v9)
brnz v9, ebb3(v9)
jump ebb4

View File

@@ -9,11 +9,11 @@ function %spill_return() -> i32 {
ebb0:
v0 = call fn0()
; check: $(reg=$V) = call $fn0
; check: $v0 = spill $reg
; check: $(reg=$V) = call fn0
; check: v0 = spill $reg
v2 = call fn0()
; check: $v2 = call $fn0
; check: v2 = call fn0
return v0
; check: $(reload=$V) = fill $v0
; check: $(reload=$V) = fill v0
; check: return $reload
}

View File

@@ -25,13 +25,13 @@ function %pyramid(i32) -> i32 {
; check: ss2 = spill_slot 4
; not: spill_slot
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS $v1 = spill $rv1
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
; nextln: ,ss2]$WS v2 = spill $r1v2
; not: spill
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
@@ -46,7 +46,7 @@ ebb0(v1: i32):
v13 = iadd_imm v12, 12
v14 = iadd_imm v13, 12
v33 = iadd v13, v14
; check: iadd $v13
; check: iadd v13
v32 = iadd v33, v12
v31 = iadd v32, v11
v30 = iadd v31, v10
@@ -58,26 +58,26 @@ ebb0(v1: i32):
v24 = iadd v25, v4
v23 = iadd v24, v3
v22 = iadd v23, v2
; check: $(r2v2=$V) = fill $v2
; check: $v22 = iadd $v23, $r2v2
; check: $(r2v2=$V) = fill v2
; check: v22 = iadd v23, $r2v2
v21 = iadd v22, v1
; check: $(r2v1=$V) = fill $v1
; check: $v21 = iadd $v22, $r2v1
; check: $(r2v1=$V) = fill v1
; check: v21 = iadd v22, $r2v1
; check: $(rlink2=$V) = fill $link
return v21
; check: return $v21, $rlink2
; check: return v21, $rlink2
}
; All values live across a call must be spilled
function %across_call(i32) {
fn0 = function %foo(i32)
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
call fn0(v1)
; check: call $fn0
; check: call fn0
call fn0(v1)
; check: fill $v1
; check: call $fn0
; check: fill v1
; check: call fn0
return
}
@@ -85,9 +85,9 @@ ebb0(v1: i32):
function %doubleuse(i32) {
fn0 = function %xx(i32, i32)
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
; check: $(c=$V) = copy v0
call fn0(v0, v0)
; check: call $fn0($v0, $c)
; check: call fn0(v0, $c)
return
}
@@ -104,9 +104,9 @@ ebb0(v0: i32):
function %doubleuse_icall2(i32) {
sig0 = (i32, i32) native
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
; check: $(c=$V) = copy v0
call_indirect sig0, v0(v0, v0)
; check: call_indirect $sig0, $v0($v0, $c)
; check: call_indirect sig0, v0(v0, $c)
return
}
@@ -116,8 +116,8 @@ function %stackargs(i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss1 = incoming_arg 4, offset 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; unordered: fill $v6
; unordered: fill $v7
; unordered: fill v6
; unordered: fill v7
v10 = iadd v6, v7
return v10
}
@@ -125,7 +125,7 @@ ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; More EBB arguments than registers.
function %ebbargs(i32) -> i32 {
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
v2 = iconst.i32 1
jump ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
@@ -148,7 +148,7 @@ ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17:
; Spilling an EBB argument to make room for a branch operand.
function %brargs(i32) -> i32 {
ebb0(v1: i32):
; check: $v1 = spill
; check: v1 = spill
v2 = iconst.i32 1
brnz v1, ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
return v1
@@ -179,13 +179,13 @@ function %use_spilled_value(i32) -> i32 {
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS $v1 = spill $rv1
; check: ebb0($(rv1=$V): i32 [%x10], $(rlink=$V): i32 [%x1])
; check: ,ss0]$WS v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
; nextln: ,ss2]$WS v2 = spill $r1v2
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
v5 = iadd_imm v4, 12

View File

@@ -5,7 +5,7 @@ ebb0(v0: i32, v1: i32):
v2 = iadd v0, v1
v3 = iadd v0, v1
v4 = imul v2, v3
; check: v4 = imul $v2, $v2
; check: v4 = imul v2, v2
return v4
}
@@ -16,7 +16,7 @@ ebb0(v0: i32, v1: i32):
v4 = imul v2, v3
v5 = imul v2, v2
v6 = iadd v4, v5
; check: v6 = iadd $v4, $v4
; check: v6 = iadd v4, v4
return v6
}

View File

@@ -6,7 +6,7 @@ use cretonne::ir::Function;
use cretonne::isa::TargetIsa;
use cretonne::settings::{Flags, FlagsOrIsa};
use cton_reader::{Details, Comment};
use filecheck::{self, CheckerBuilder, Checker, Value as FCValue};
use filecheck::{CheckerBuilder, Checker, NO_VARIABLES};
pub type Result<T> = result::Result<T, String>;
@@ -67,34 +67,19 @@ pub trait SubTest {
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()>;
}
/// Make the parser's source map available as filecheck variables.
///
/// This means that the filecheck directives can refer to entities like `jump $ebb3`, where `$ebb3`
/// will expand to the EBB number that was assigned to `ebb3` in the input source.
///
/// The expanded entity names are wrapped in word boundary regex guards so that 'inst1' doesn't
/// match 'inst10'.
impl<'a> filecheck::VariableMap for Context<'a> {
fn lookup(&self, varname: &str) -> Option<FCValue> {
self.details.map.lookup_str(varname).map(|e| {
FCValue::Regex(format!(r"\b{}\b", e).into())
})
}
}
/// Run filecheck on `text`, using directives extracted from `context`.
pub fn run_filecheck(text: &str, context: &Context) -> Result<()> {
let checker = build_filechecker(context)?;
if checker.check(text, context).map_err(
|e| format!("filecheck: {}", e),
)?
if checker.check(text, NO_VARIABLES).map_err(|e| {
format!("filecheck: {}", e)
})?
{
Ok(())
} else {
// Filecheck mismatch. Emit an explanation as output.
let (_, explain) = checker.explain(text, context).map_err(
|e| format!("explain: {}", e),
)?;
let (_, explain) = checker.explain(text, NO_VARIABLES).map_err(|e| {
format!("explain: {}", e)
})?;
Err(format!("filecheck failed:\n{}{}", checker, explain))
}
}