Add a calling convention to all function signatures.
A CallConv enum on every function signature makes it possible to
generate calls to functions with different calling conventions within
the same ISA / within a single function.
The calling conventions also serve as a way of customizing Cretonne's
behavior when embedded inside a VM. As an example, the SpiderWASM
calling convention is used to compile WebAssembly functions that run
inside the SpiderMonkey virtual machine.
All function signatures must have a calling convention at the end, so
this changes the textual IL syntax.
Before:
sig1 = signature(i32, f64) -> f64
After
sig1 = (i32, f64) -> f64 native
sig2 = (i32) spiderwasm
When printing functions, the signature goes after the return types:
function %r1() -> i32, f32 spiderwasm {
ebb1:
...
}
In the parser, this calling convention is optional and defaults to
"native". This is mostly to avoid updating all the existing test cases
under filetests/. When printing a function, the calling convention is
always included, including for "native" functions.
This commit is contained in:
committed by
Jakob Stoklund Olesen
parent
5fa991e325
commit
c96d4daa20
@@ -1,6 +1,6 @@
|
||||
test verifier
|
||||
|
||||
function %average(i32, i32) -> f32 {
|
||||
function %average(i32, i32) -> f32 native {
|
||||
ss1 = local 8 ; Stack slot for ``sum``.
|
||||
|
||||
ebb1(v1: i32, v2: i32):
|
||||
|
||||
@@ -410,13 +410,6 @@ This simple example illustrates direct function calls and signatures::
|
||||
|
||||
Indirect function calls use a signature declared in the preamble.
|
||||
|
||||
.. inst:: SIG = signature signature
|
||||
|
||||
Declare a function signature for use with indirect calls.
|
||||
|
||||
:arg signature: Function signature. See :token:`signature`.
|
||||
:result SIG: A signature identifier.
|
||||
|
||||
.. autoinst:: call_indirect
|
||||
|
||||
.. todo:: Define safe indirect function calls.
|
||||
|
||||
@@ -6,14 +6,14 @@ isa intel
|
||||
; regex: V=v\d+
|
||||
|
||||
function %f() {
|
||||
sig0 = signature(i32) -> i32
|
||||
; check: sig0 = signature(i32 [%rdi]) -> i32 [%rax]
|
||||
sig0 = (i32) -> i32 native
|
||||
; check: sig0 = (i32 [%rdi]) -> i32 [%rax] native
|
||||
|
||||
sig1 = signature(i64) -> b1
|
||||
; check: sig1 = signature(i64 [%rdi]) -> b1 [%rax]
|
||||
sig1 = (i64) -> b1 native
|
||||
; check: sig1 = (i64 [%rdi]) -> b1 [%rax] native
|
||||
|
||||
sig2 = signature(f32, i64) -> f64
|
||||
; check: sig2 = signature(f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0]
|
||||
sig2 = (f32, i64) -> f64 native
|
||||
; check: sig2 = (f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0] native
|
||||
|
||||
ebb0:
|
||||
return
|
||||
|
||||
@@ -9,7 +9,7 @@ isa intel haswell
|
||||
|
||||
function %I32() {
|
||||
fn0 = function %foo()
|
||||
sig0 = signature()
|
||||
sig0 = ()
|
||||
|
||||
ebb0:
|
||||
; asm: movl $1, %ecx
|
||||
|
||||
@@ -11,7 +11,7 @@ isa intel haswell
|
||||
; Tests for i64 instructions.
|
||||
function %I64() {
|
||||
fn0 = function %foo()
|
||||
sig0 = signature()
|
||||
sig0 = ()
|
||||
|
||||
ebb0:
|
||||
|
||||
@@ -457,7 +457,7 @@ ebb2:
|
||||
; be done by an instruction shrinking pass.
|
||||
function %I32() {
|
||||
fn0 = function %foo()
|
||||
sig0 = signature()
|
||||
sig0 = ()
|
||||
|
||||
ebb0:
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ isa riscv enable_e
|
||||
function %f() {
|
||||
; Spilling into the stack args after %x15 since %16 and up are not
|
||||
; available in RV32E.
|
||||
sig0 = signature(i64, i64, i64, i64) -> i64
|
||||
; check: sig0 = signature(i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11]
|
||||
sig0 = (i64, i64, i64, i64) -> i64 native
|
||||
; check: sig0 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11] native
|
||||
ebb0:
|
||||
return
|
||||
}
|
||||
|
||||
@@ -5,27 +5,27 @@ isa riscv
|
||||
; regex: V=v\d+
|
||||
|
||||
function %f() {
|
||||
sig0 = signature(i32) -> i32
|
||||
; check: sig0 = signature(i32 [%x10]) -> i32 [%x10]
|
||||
sig0 = (i32) -> i32 native
|
||||
; check: sig0 = (i32 [%x10]) -> i32 [%x10] native
|
||||
|
||||
sig1 = signature(i64) -> b1
|
||||
; check: sig1 = signature(i32 [%x10], i32 [%x11]) -> b1 [%x10]
|
||||
sig1 = (i64) -> b1 native
|
||||
; check: sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
|
||||
|
||||
; The i64 argument must go in an even-odd register pair.
|
||||
sig2 = signature(f32, i64) -> f64
|
||||
; check: sig2 = signature(f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10]
|
||||
sig2 = (f32, i64) -> f64 native
|
||||
; check: sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
|
||||
|
||||
; Spilling into the stack args.
|
||||
sig3 = signature(f64, f64, f64, f64, f64, f64, f64, i64) -> f64
|
||||
; check: sig3 = signature(f64 [%f10], f64 [%f11], f64 [%f12], f64 [%f13], f64 [%f14], f64 [%f15], f64 [%f16], i32 [0], i32 [4]) -> f64 [%f10]
|
||||
sig3 = (f64, f64, f64, f64, f64, f64, f64, i64) -> f64 native
|
||||
; check: sig3 = (f64 [%f10], f64 [%f11], f64 [%f12], f64 [%f13], f64 [%f14], f64 [%f15], f64 [%f16], i32 [0], i32 [4]) -> f64 [%f10] native
|
||||
|
||||
; Splitting vectors.
|
||||
sig4 = signature(i32x4)
|
||||
; check: sig4 = signature(i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13])
|
||||
sig4 = (i32x4) native
|
||||
; check: sig4 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13]) native
|
||||
|
||||
; Splitting vectors, then splitting ints.
|
||||
sig5 = signature(i64x4)
|
||||
; check: sig5 = signature(i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17])
|
||||
sig5 = (i64x4) native
|
||||
; check: sig5 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17]) native
|
||||
|
||||
ebb0:
|
||||
return
|
||||
|
||||
@@ -4,7 +4,7 @@ isa riscv
|
||||
|
||||
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
|
||||
fn0 = function %foo()
|
||||
sig0 = signature()
|
||||
sig0 = ()
|
||||
|
||||
ebb0(v9999: i32):
|
||||
[-,%x10] v1 = iconst.i32 1
|
||||
|
||||
@@ -106,7 +106,7 @@ ebb0(v0: i64x4):
|
||||
}
|
||||
|
||||
function %indirect(i32) {
|
||||
sig1 = signature()
|
||||
sig1 = () native
|
||||
ebb0(v0: i32):
|
||||
call_indirect sig1, v0()
|
||||
return
|
||||
@@ -114,7 +114,7 @@ ebb0(v0: i32):
|
||||
|
||||
; The first argument to call_indirect doesn't get altered.
|
||||
function %indirect_arg(i32, f32x2) {
|
||||
sig1 = signature(f32x2)
|
||||
sig1 = (f32x2) native
|
||||
ebb0(v0: i32, v1: f32x2):
|
||||
call_indirect sig1, v0(v1)
|
||||
; check: call_indirect $sig1, $v0($V, $V)
|
||||
|
||||
@@ -3,32 +3,32 @@ test legalizer
|
||||
isa riscv
|
||||
|
||||
function %parse_encoding(i32 [%x5]) -> i32 [%x10] {
|
||||
; check: function %parse_encoding(i32 [%x5], i32 link [%x1]) -> i32 [%x10], i32 link [%x1] {
|
||||
; check: function %parse_encoding(i32 [%x5], i32 link [%x1]) -> i32 [%x10], i32 link [%x1] native {
|
||||
|
||||
sig0 = signature(i32 [%x10]) -> i32 [%x10]
|
||||
; check: sig0 = signature(i32 [%x10]) -> i32 [%x10]
|
||||
sig0 = (i32 [%x10]) -> i32 [%x10] native
|
||||
; check: sig0 = (i32 [%x10]) -> i32 [%x10] native
|
||||
|
||||
sig1 = signature(i32 [%x10], i32 [%x11]) -> b1 [%x10]
|
||||
; check: sig1 = signature(i32 [%x10], i32 [%x11]) -> b1 [%x10]
|
||||
sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
|
||||
; check: sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
|
||||
|
||||
sig2 = signature(f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10]
|
||||
; check: sig2 = signature(f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10]
|
||||
sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
|
||||
; check: sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
|
||||
|
||||
; Arguments on stack where not necessary
|
||||
sig3 = signature(f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10]
|
||||
; check: sig3 = signature(f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10]
|
||||
sig3 = (f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10] native
|
||||
; check: sig3 = (f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10] native
|
||||
|
||||
; Stack argument before register argument
|
||||
sig4 = signature(f32 [72], i32 [%x10])
|
||||
; check: sig4 = signature(f32 [72], i32 [%x10])
|
||||
sig4 = (f32 [72], i32 [%x10]) native
|
||||
; check: sig4 = (f32 [72], i32 [%x10]) native
|
||||
|
||||
; Return value on stack
|
||||
sig5 = signature() -> f32 [0]
|
||||
; check: sig5 = signature() -> f32 [0]
|
||||
sig5 = () -> f32 [0] native
|
||||
; check: sig5 = () -> f32 [0] native
|
||||
|
||||
; function + signature
|
||||
fn15 = function %bar(i32 [%x10]) -> b1 [%x10]
|
||||
; check: sig6 = signature(i32 [%x10]) -> b1 [%x10]
|
||||
fn15 = function %bar(i32 [%x10]) -> b1 [%x10] native
|
||||
; check: sig6 = (i32 [%x10]) -> b1 [%x10] native
|
||||
; nextln: fn0 = sig6 %bar
|
||||
|
||||
ebb0(v0: i32):
|
||||
|
||||
@@ -14,7 +14,7 @@ ebb2(v5: i32):
|
||||
return v5
|
||||
|
||||
}
|
||||
; sameln: function %simple_loop(i32) -> i32 {
|
||||
; sameln: function %simple_loop
|
||||
; nextln: ebb2(v6: i32):
|
||||
; nextln: v1 = iconst.i32 1
|
||||
; nextln: v2 = iconst.i32 2
|
||||
|
||||
@@ -39,7 +39,7 @@ ebb5(v16: i32):
|
||||
return v17
|
||||
}
|
||||
|
||||
; sameln: function %complex(i32) -> i32 {
|
||||
; sameln: function %complex
|
||||
; nextln: ebb6(v20: i32):
|
||||
; nextln: v1 = iconst.i32 1
|
||||
; nextln: v2 = iconst.i32 4
|
||||
|
||||
@@ -9,7 +9,7 @@ ebb0:
|
||||
ebb1:
|
||||
jump ebb0()
|
||||
}
|
||||
; sameln: function %minimal() {
|
||||
; sameln: function %minimal() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: jump ebb1
|
||||
; nextln:
|
||||
@@ -25,7 +25,7 @@ ebb0(v90: i32):
|
||||
ebb1(v91: i32):
|
||||
jump ebb0(v91)
|
||||
}
|
||||
; sameln: function %onearg(i32) {
|
||||
; sameln: function %onearg(i32) native {
|
||||
; nextln: ebb0($v90: i32):
|
||||
; nextln: jump ebb1($v90)
|
||||
; nextln:
|
||||
@@ -41,7 +41,7 @@ ebb0(v90: i32, v91: f32):
|
||||
ebb1(v92: i32, v93: f32):
|
||||
jump ebb0(v92, v93)
|
||||
}
|
||||
; sameln: function %twoargs(i32, f32) {
|
||||
; sameln: function %twoargs(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: jump ebb1($v90, $v91)
|
||||
; nextln:
|
||||
@@ -57,7 +57,7 @@ ebb0(v90: i32):
|
||||
ebb1:
|
||||
brnz v90, ebb1()
|
||||
}
|
||||
; sameln: function %minimal(i32) {
|
||||
; sameln: function %minimal(i32) native {
|
||||
; nextln: ebb0($v90: i32):
|
||||
; nextln: brz $v90, ebb1
|
||||
; nextln:
|
||||
@@ -72,7 +72,7 @@ ebb0(v90: i32, v91: f32):
|
||||
ebb1(v92: i32, v93: f32):
|
||||
brnz v90, ebb0(v92, v93)
|
||||
}
|
||||
; sameln: function %twoargs(i32, f32) {
|
||||
; sameln: function %twoargs(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: brz $v90, ebb1($v90, $v91)
|
||||
; nextln:
|
||||
@@ -94,7 +94,7 @@ ebb30:
|
||||
ebb40:
|
||||
trap
|
||||
}
|
||||
; sameln: function %jumptable(i32) {
|
||||
; sameln: function %jumptable(i32) native {
|
||||
; nextln: jt0 = jump_table 0
|
||||
; nextln: jt1 = jump_table 0, 0, ebb0, ebb3, ebb1, ebb2
|
||||
; nextln:
|
||||
|
||||
@@ -5,18 +5,18 @@ function %mini() {
|
||||
ebb1:
|
||||
return
|
||||
}
|
||||
; sameln: function %mini() {
|
||||
; sameln: function %mini() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: return
|
||||
; nextln: }
|
||||
|
||||
function %r1() -> i32, f32 {
|
||||
function %r1() -> i32, f32 spiderwasm {
|
||||
ebb1:
|
||||
v1 = iconst.i32 3
|
||||
v2 = f32const 0.0
|
||||
return v1, v2
|
||||
}
|
||||
; sameln: function %r1() -> i32, f32 {
|
||||
; sameln: function %r1() -> i32, f32 spiderwasm {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v1 = iconst.i32 3
|
||||
; nextln: $v2 = f32const 0.0
|
||||
@@ -24,15 +24,15 @@ ebb1:
|
||||
; nextln: }
|
||||
|
||||
function %signatures() {
|
||||
sig10 = signature()
|
||||
sig11 = signature(i32, f64) -> i32, b1
|
||||
sig10 = ()
|
||||
sig11 = (i32, f64) -> i32, b1 spiderwasm
|
||||
fn5 = sig11 %foo
|
||||
fn8 = function %bar(i32) -> b1
|
||||
}
|
||||
; sameln: function %signatures() {
|
||||
; nextln: $sig10 = signature()
|
||||
; nextln: $sig11 = signature(i32, f64) -> i32, b1
|
||||
; nextln: sig2 = signature(i32) -> b1
|
||||
; sameln: function %signatures() native {
|
||||
; nextln: $sig10 = () native
|
||||
; nextln: $sig11 = (i32, f64) -> i32, b1 spiderwasm
|
||||
; nextln: sig2 = (i32) -> b1 native
|
||||
; nextln: $fn5 = $sig11 %foo
|
||||
; nextln: $fn8 = sig2 %bar
|
||||
; nextln: }
|
||||
@@ -54,9 +54,9 @@ ebb0:
|
||||
; check: return
|
||||
|
||||
function %indirect(i64) {
|
||||
sig0 = signature(i64)
|
||||
sig1 = signature() -> i32
|
||||
sig2 = signature() -> i32, f32
|
||||
sig0 = (i64)
|
||||
sig1 = () -> i32
|
||||
sig2 = () -> i32, f32
|
||||
|
||||
ebb0(v0: i64):
|
||||
v1 = call_indirect sig1, v0()
|
||||
@@ -74,7 +74,7 @@ function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32
|
||||
ebb0(v1: i32, v2: i32, v3: i32, v4: i32):
|
||||
return v4, v2, v3, v1
|
||||
}
|
||||
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret {
|
||||
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret native {
|
||||
; check: ebb0($v1: i32, $v2: i32, $v3: i32, $v4: i32):
|
||||
; check: return $v4, $v2, $v3, $v1
|
||||
; check: }
|
||||
|
||||
@@ -13,7 +13,7 @@ ebb1(v0: i32, v1: i32):
|
||||
v9 = iadd v8, v7
|
||||
[Iret#5] return v0, v8
|
||||
}
|
||||
; sameln: function %foo(i32, i32) {
|
||||
; sameln: function %foo(i32, i32) native {
|
||||
; nextln: $ebb1($v0: i32, $v1: i32):
|
||||
; nextln: [-,-]$WS $v2 = iadd $v0, $v1
|
||||
; nextln: [-]$WS trap
|
||||
|
||||
@@ -2,4 +2,4 @@ test cat
|
||||
|
||||
; 'function' is not a keyword, and can be used as the name of a function too.
|
||||
function %function() {}
|
||||
; check: function %function()
|
||||
; check: function %function() native
|
||||
|
||||
@@ -15,7 +15,7 @@ ebb100(v20: i32):
|
||||
v9200 = f64const 0x4.0p0
|
||||
trap
|
||||
}
|
||||
; sameln: function %defs() {
|
||||
; sameln: function %defs() native {
|
||||
; nextln: $ebb100($v20: i32):
|
||||
; nextln: $v1000 = iconst.i32x8 5
|
||||
; nextln: $v9200 = f64const 0x1.0000000000000p2
|
||||
@@ -29,7 +29,7 @@ ebb100(v20: i32):
|
||||
v200 = iadd v20, v1000
|
||||
jump ebb100(v1000)
|
||||
}
|
||||
; sameln: function %use_value() {
|
||||
; sameln: function %use_value() native {
|
||||
; nextln: ebb0($v20: i32):
|
||||
; nextln: $v1000 = iadd_imm $v20, 5
|
||||
; nextln: $v200 = iadd $v20, $v1000
|
||||
|
||||
@@ -5,7 +5,7 @@ function %minimal() {
|
||||
ebb0:
|
||||
trap
|
||||
}
|
||||
; sameln: function %minimal() {
|
||||
; sameln: function %minimal() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: trap
|
||||
; nextln: }
|
||||
@@ -18,7 +18,7 @@ ebb0:
|
||||
v1 = iconst.i8 6
|
||||
v2 = ishl v0, v1
|
||||
}
|
||||
; sameln: function %ivalues() {
|
||||
; sameln: function %ivalues() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = iconst.i32 2
|
||||
; nextln: $v1 = iconst.i8 6
|
||||
@@ -34,7 +34,7 @@ ebb0:
|
||||
v2 = bextend.b32 v1
|
||||
v3 = bxor v0, v2
|
||||
}
|
||||
; sameln: function %bvalues() {
|
||||
; sameln: function %bvalues() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = bconst.b32 true
|
||||
; nextln: $v1 = bconst.b8 false
|
||||
@@ -47,7 +47,7 @@ function %select() {
|
||||
ebb0(v90: i32, v91: i32, v92: b1):
|
||||
v0 = select v92, v90, v91
|
||||
}
|
||||
; sameln: function %select() {
|
||||
; sameln: function %select() native {
|
||||
; nextln: ebb0($v90: i32, $v91: i32, $v92: b1):
|
||||
; nextln: $v0 = select $v92, $v90, $v91
|
||||
; nextln: }
|
||||
@@ -59,7 +59,7 @@ ebb0:
|
||||
v1 = extractlane v0, 3
|
||||
v2 = insertlane v0, 1, v1
|
||||
}
|
||||
; sameln: function %lanes() {
|
||||
; sameln: function %lanes() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = iconst.i32x4 2
|
||||
; nextln: $v1 = extractlane $v0, 3
|
||||
@@ -75,7 +75,7 @@ ebb0(v90: i32, v91: i32):
|
||||
v3 = irsub_imm v91, 45
|
||||
br_icmp eq v90, v91, ebb0(v91, v90)
|
||||
}
|
||||
; sameln: function %icmp(i32, i32) {
|
||||
; sameln: function %icmp(i32, i32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: i32):
|
||||
; nextln: $v0 = icmp eq $v90, $v91
|
||||
; nextln: $v1 = icmp ult $v90, $v91
|
||||
@@ -91,7 +91,7 @@ ebb0(v90: f32, v91: f32):
|
||||
v1 = fcmp uno v90, v91
|
||||
v2 = fcmp lt v90, v91
|
||||
}
|
||||
; sameln: function %fcmp(f32, f32) {
|
||||
; sameln: function %fcmp(f32, f32) native {
|
||||
; nextln: ebb0($v90: f32, $v91: f32):
|
||||
; nextln: $v0 = fcmp eq $v90, $v91
|
||||
; nextln: $v1 = fcmp uno $v90, $v91
|
||||
@@ -105,7 +105,7 @@ ebb0(v90: i32, v91: f32):
|
||||
v0 = bitcast.i8x4 v90
|
||||
v1 = bitcast.i32 v91
|
||||
}
|
||||
; sameln: function %bitcast(i32, f32) {
|
||||
; sameln: function %bitcast(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: $v0 = bitcast.i8x4 $v90
|
||||
; nextln: $v1 = bitcast.i32 $v91
|
||||
@@ -124,7 +124,7 @@ ebb0:
|
||||
stack_store v1, ss10+2
|
||||
stack_store v2, ss2
|
||||
}
|
||||
; sameln: function %stack() {
|
||||
; sameln: function %stack() native {
|
||||
; nextln: $ss10 = spill_slot 8
|
||||
; nextln: $ss2 = local 4
|
||||
; nextln: $ss3 = incoming_arg 4, offset 8
|
||||
@@ -144,7 +144,7 @@ ebb0(v1: i32):
|
||||
v3 = heap_load.f32 v1+12
|
||||
heap_store v3, v1
|
||||
}
|
||||
; sameln: function %heap(i32) {
|
||||
; sameln: function %heap(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: $v2 = heap_load.f32 $v1
|
||||
; nextln: $v3 = heap_load.f32 $v1+12
|
||||
@@ -164,7 +164,7 @@ ebb0(v1: i32):
|
||||
store aligned v3, v1+12
|
||||
store notrap aligned v3, v1-12
|
||||
}
|
||||
; sameln: function %memory(i32) {
|
||||
; sameln: function %memory(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: $v2 = load.i64 $v1
|
||||
; nextln: $v3 = load.i64 aligned $v1
|
||||
@@ -185,7 +185,7 @@ ebb0(v1: i32):
|
||||
regmove v1, %20 -> %10
|
||||
return
|
||||
}
|
||||
; sameln: function %diversion(i32) {
|
||||
; sameln: function %diversion(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: regmove $v1, %10 -> %20
|
||||
; nextln: regmove $v1, %20 -> %10
|
||||
|
||||
@@ -93,7 +93,7 @@ ebb0(v0: i32):
|
||||
|
||||
; The same value used as indirect callee and argument.
|
||||
function %doubleuse_icall1(i32) {
|
||||
sig0 = signature(i32)
|
||||
sig0 = (i32) native
|
||||
ebb0(v0: i32):
|
||||
; not:copy
|
||||
call_indirect sig0, v0(v0)
|
||||
@@ -102,7 +102,7 @@ ebb0(v0: i32):
|
||||
|
||||
; The same value used as indirect callee and two arguments.
|
||||
function %doubleuse_icall2(i32) {
|
||||
sig0 = signature(i32, i32)
|
||||
sig0 = (i32, i32) native
|
||||
ebb0(v0: i32):
|
||||
; check: $(c=$V) = copy $v0
|
||||
call_indirect sig0, v0(v0, v0)
|
||||
|
||||
@@ -27,7 +27,7 @@ fn test_reverse_postorder_traversal(function_source: &str, ebb_order: Vec<u32>)
|
||||
#[test]
|
||||
fn simple_traversal() {
|
||||
test_reverse_postorder_traversal("
|
||||
function %test(i32) {
|
||||
function %test(i32) native {
|
||||
ebb0(v0: i32):
|
||||
brz v0, ebb1
|
||||
jump ebb2
|
||||
@@ -56,7 +56,7 @@ fn simple_traversal() {
|
||||
#[test]
|
||||
fn loops_one() {
|
||||
test_reverse_postorder_traversal("
|
||||
function %test(i32) {
|
||||
function %test(i32) native {
|
||||
ebb0(v0: i32):
|
||||
jump ebb1
|
||||
ebb1:
|
||||
@@ -74,7 +74,7 @@ fn loops_one() {
|
||||
#[test]
|
||||
fn loops_two() {
|
||||
test_reverse_postorder_traversal("
|
||||
function %test(i32) {
|
||||
function %test(i32) native {
|
||||
ebb0(v0: i32):
|
||||
brz v0, ebb1
|
||||
jump ebb2
|
||||
@@ -99,7 +99,7 @@ fn loops_two() {
|
||||
#[test]
|
||||
fn loops_three() {
|
||||
test_reverse_postorder_traversal("
|
||||
function %test(i32) {
|
||||
function %test(i32) native {
|
||||
ebb0(v0: i32):
|
||||
brz v0, ebb1
|
||||
jump ebb2
|
||||
@@ -129,7 +129,7 @@ fn loops_three() {
|
||||
#[test]
|
||||
fn back_edge_one() {
|
||||
test_reverse_postorder_traversal("
|
||||
function %test(i32) {
|
||||
function %test(i32) native {
|
||||
ebb0(v0: i32):
|
||||
brz v0, ebb1
|
||||
jump ebb2
|
||||
|
||||
Reference in New Issue
Block a user