Add a calling convention to all function signatures.
A CallConv enum on every function signature makes it possible to
generate calls to functions with different calling conventions within
the same ISA / within a single function.
The calling conventions also serve as a way of customizing Cretonne's
behavior when embedded inside a VM. As an example, the SpiderWASM
calling convention is used to compile WebAssembly functions that run
inside the SpiderMonkey virtual machine.
All function signatures must have a calling convention at the end, so
this changes the textual IL syntax.
Before:
sig1 = signature(i32, f64) -> f64
After
sig1 = (i32, f64) -> f64 native
sig2 = (i32) spiderwasm
When printing functions, the signature goes after the return types:
function %r1() -> i32, f32 spiderwasm {
ebb1:
...
}
In the parser, this calling convention is optional and defaults to
"native". This is mostly to avoid updating all the existing test cases
under filetests/. When printing a function, the calling convention is
always included, including for "native" functions.
This commit is contained in:
committed by
Jakob Stoklund Olesen
parent
bf1820587c
commit
7f3b807597
@@ -9,7 +9,7 @@ ebb0:
|
||||
ebb1:
|
||||
jump ebb0()
|
||||
}
|
||||
; sameln: function %minimal() {
|
||||
; sameln: function %minimal() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: jump ebb1
|
||||
; nextln:
|
||||
@@ -25,7 +25,7 @@ ebb0(v90: i32):
|
||||
ebb1(v91: i32):
|
||||
jump ebb0(v91)
|
||||
}
|
||||
; sameln: function %onearg(i32) {
|
||||
; sameln: function %onearg(i32) native {
|
||||
; nextln: ebb0($v90: i32):
|
||||
; nextln: jump ebb1($v90)
|
||||
; nextln:
|
||||
@@ -41,7 +41,7 @@ ebb0(v90: i32, v91: f32):
|
||||
ebb1(v92: i32, v93: f32):
|
||||
jump ebb0(v92, v93)
|
||||
}
|
||||
; sameln: function %twoargs(i32, f32) {
|
||||
; sameln: function %twoargs(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: jump ebb1($v90, $v91)
|
||||
; nextln:
|
||||
@@ -57,7 +57,7 @@ ebb0(v90: i32):
|
||||
ebb1:
|
||||
brnz v90, ebb1()
|
||||
}
|
||||
; sameln: function %minimal(i32) {
|
||||
; sameln: function %minimal(i32) native {
|
||||
; nextln: ebb0($v90: i32):
|
||||
; nextln: brz $v90, ebb1
|
||||
; nextln:
|
||||
@@ -72,7 +72,7 @@ ebb0(v90: i32, v91: f32):
|
||||
ebb1(v92: i32, v93: f32):
|
||||
brnz v90, ebb0(v92, v93)
|
||||
}
|
||||
; sameln: function %twoargs(i32, f32) {
|
||||
; sameln: function %twoargs(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: brz $v90, ebb1($v90, $v91)
|
||||
; nextln:
|
||||
@@ -94,7 +94,7 @@ ebb30:
|
||||
ebb40:
|
||||
trap
|
||||
}
|
||||
; sameln: function %jumptable(i32) {
|
||||
; sameln: function %jumptable(i32) native {
|
||||
; nextln: jt0 = jump_table 0
|
||||
; nextln: jt1 = jump_table 0, 0, ebb0, ebb3, ebb1, ebb2
|
||||
; nextln:
|
||||
|
||||
@@ -5,18 +5,18 @@ function %mini() {
|
||||
ebb1:
|
||||
return
|
||||
}
|
||||
; sameln: function %mini() {
|
||||
; sameln: function %mini() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: return
|
||||
; nextln: }
|
||||
|
||||
function %r1() -> i32, f32 {
|
||||
function %r1() -> i32, f32 spiderwasm {
|
||||
ebb1:
|
||||
v1 = iconst.i32 3
|
||||
v2 = f32const 0.0
|
||||
return v1, v2
|
||||
}
|
||||
; sameln: function %r1() -> i32, f32 {
|
||||
; sameln: function %r1() -> i32, f32 spiderwasm {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v1 = iconst.i32 3
|
||||
; nextln: $v2 = f32const 0.0
|
||||
@@ -24,15 +24,15 @@ ebb1:
|
||||
; nextln: }
|
||||
|
||||
function %signatures() {
|
||||
sig10 = signature()
|
||||
sig11 = signature(i32, f64) -> i32, b1
|
||||
sig10 = ()
|
||||
sig11 = (i32, f64) -> i32, b1 spiderwasm
|
||||
fn5 = sig11 %foo
|
||||
fn8 = function %bar(i32) -> b1
|
||||
}
|
||||
; sameln: function %signatures() {
|
||||
; nextln: $sig10 = signature()
|
||||
; nextln: $sig11 = signature(i32, f64) -> i32, b1
|
||||
; nextln: sig2 = signature(i32) -> b1
|
||||
; sameln: function %signatures() native {
|
||||
; nextln: $sig10 = () native
|
||||
; nextln: $sig11 = (i32, f64) -> i32, b1 spiderwasm
|
||||
; nextln: sig2 = (i32) -> b1 native
|
||||
; nextln: $fn5 = $sig11 %foo
|
||||
; nextln: $fn8 = sig2 %bar
|
||||
; nextln: }
|
||||
@@ -54,9 +54,9 @@ ebb0:
|
||||
; check: return
|
||||
|
||||
function %indirect(i64) {
|
||||
sig0 = signature(i64)
|
||||
sig1 = signature() -> i32
|
||||
sig2 = signature() -> i32, f32
|
||||
sig0 = (i64)
|
||||
sig1 = () -> i32
|
||||
sig2 = () -> i32, f32
|
||||
|
||||
ebb0(v0: i64):
|
||||
v1 = call_indirect sig1, v0()
|
||||
@@ -74,7 +74,7 @@ function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32
|
||||
ebb0(v1: i32, v2: i32, v3: i32, v4: i32):
|
||||
return v4, v2, v3, v1
|
||||
}
|
||||
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret {
|
||||
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret native {
|
||||
; check: ebb0($v1: i32, $v2: i32, $v3: i32, $v4: i32):
|
||||
; check: return $v4, $v2, $v3, $v1
|
||||
; check: }
|
||||
|
||||
@@ -13,7 +13,7 @@ ebb1(v0: i32, v1: i32):
|
||||
v9 = iadd v8, v7
|
||||
[Iret#5] return v0, v8
|
||||
}
|
||||
; sameln: function %foo(i32, i32) {
|
||||
; sameln: function %foo(i32, i32) native {
|
||||
; nextln: $ebb1($v0: i32, $v1: i32):
|
||||
; nextln: [-,-]$WS $v2 = iadd $v0, $v1
|
||||
; nextln: [-]$WS trap
|
||||
|
||||
@@ -2,4 +2,4 @@ test cat
|
||||
|
||||
; 'function' is not a keyword, and can be used as the name of a function too.
|
||||
function %function() {}
|
||||
; check: function %function()
|
||||
; check: function %function() native
|
||||
|
||||
@@ -15,7 +15,7 @@ ebb100(v20: i32):
|
||||
v9200 = f64const 0x4.0p0
|
||||
trap
|
||||
}
|
||||
; sameln: function %defs() {
|
||||
; sameln: function %defs() native {
|
||||
; nextln: $ebb100($v20: i32):
|
||||
; nextln: $v1000 = iconst.i32x8 5
|
||||
; nextln: $v9200 = f64const 0x1.0000000000000p2
|
||||
@@ -29,7 +29,7 @@ ebb100(v20: i32):
|
||||
v200 = iadd v20, v1000
|
||||
jump ebb100(v1000)
|
||||
}
|
||||
; sameln: function %use_value() {
|
||||
; sameln: function %use_value() native {
|
||||
; nextln: ebb0($v20: i32):
|
||||
; nextln: $v1000 = iadd_imm $v20, 5
|
||||
; nextln: $v200 = iadd $v20, $v1000
|
||||
|
||||
@@ -5,7 +5,7 @@ function %minimal() {
|
||||
ebb0:
|
||||
trap
|
||||
}
|
||||
; sameln: function %minimal() {
|
||||
; sameln: function %minimal() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: trap
|
||||
; nextln: }
|
||||
@@ -18,7 +18,7 @@ ebb0:
|
||||
v1 = iconst.i8 6
|
||||
v2 = ishl v0, v1
|
||||
}
|
||||
; sameln: function %ivalues() {
|
||||
; sameln: function %ivalues() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = iconst.i32 2
|
||||
; nextln: $v1 = iconst.i8 6
|
||||
@@ -34,7 +34,7 @@ ebb0:
|
||||
v2 = bextend.b32 v1
|
||||
v3 = bxor v0, v2
|
||||
}
|
||||
; sameln: function %bvalues() {
|
||||
; sameln: function %bvalues() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = bconst.b32 true
|
||||
; nextln: $v1 = bconst.b8 false
|
||||
@@ -47,7 +47,7 @@ function %select() {
|
||||
ebb0(v90: i32, v91: i32, v92: b1):
|
||||
v0 = select v92, v90, v91
|
||||
}
|
||||
; sameln: function %select() {
|
||||
; sameln: function %select() native {
|
||||
; nextln: ebb0($v90: i32, $v91: i32, $v92: b1):
|
||||
; nextln: $v0 = select $v92, $v90, $v91
|
||||
; nextln: }
|
||||
@@ -59,7 +59,7 @@ ebb0:
|
||||
v1 = extractlane v0, 3
|
||||
v2 = insertlane v0, 1, v1
|
||||
}
|
||||
; sameln: function %lanes() {
|
||||
; sameln: function %lanes() native {
|
||||
; nextln: ebb0:
|
||||
; nextln: $v0 = iconst.i32x4 2
|
||||
; nextln: $v1 = extractlane $v0, 3
|
||||
@@ -75,7 +75,7 @@ ebb0(v90: i32, v91: i32):
|
||||
v3 = irsub_imm v91, 45
|
||||
br_icmp eq v90, v91, ebb0(v91, v90)
|
||||
}
|
||||
; sameln: function %icmp(i32, i32) {
|
||||
; sameln: function %icmp(i32, i32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: i32):
|
||||
; nextln: $v0 = icmp eq $v90, $v91
|
||||
; nextln: $v1 = icmp ult $v90, $v91
|
||||
@@ -91,7 +91,7 @@ ebb0(v90: f32, v91: f32):
|
||||
v1 = fcmp uno v90, v91
|
||||
v2 = fcmp lt v90, v91
|
||||
}
|
||||
; sameln: function %fcmp(f32, f32) {
|
||||
; sameln: function %fcmp(f32, f32) native {
|
||||
; nextln: ebb0($v90: f32, $v91: f32):
|
||||
; nextln: $v0 = fcmp eq $v90, $v91
|
||||
; nextln: $v1 = fcmp uno $v90, $v91
|
||||
@@ -105,7 +105,7 @@ ebb0(v90: i32, v91: f32):
|
||||
v0 = bitcast.i8x4 v90
|
||||
v1 = bitcast.i32 v91
|
||||
}
|
||||
; sameln: function %bitcast(i32, f32) {
|
||||
; sameln: function %bitcast(i32, f32) native {
|
||||
; nextln: ebb0($v90: i32, $v91: f32):
|
||||
; nextln: $v0 = bitcast.i8x4 $v90
|
||||
; nextln: $v1 = bitcast.i32 $v91
|
||||
@@ -124,7 +124,7 @@ ebb0:
|
||||
stack_store v1, ss10+2
|
||||
stack_store v2, ss2
|
||||
}
|
||||
; sameln: function %stack() {
|
||||
; sameln: function %stack() native {
|
||||
; nextln: $ss10 = spill_slot 8
|
||||
; nextln: $ss2 = local 4
|
||||
; nextln: $ss3 = incoming_arg 4, offset 8
|
||||
@@ -144,7 +144,7 @@ ebb0(v1: i32):
|
||||
v3 = heap_load.f32 v1+12
|
||||
heap_store v3, v1
|
||||
}
|
||||
; sameln: function %heap(i32) {
|
||||
; sameln: function %heap(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: $v2 = heap_load.f32 $v1
|
||||
; nextln: $v3 = heap_load.f32 $v1+12
|
||||
@@ -164,7 +164,7 @@ ebb0(v1: i32):
|
||||
store aligned v3, v1+12
|
||||
store notrap aligned v3, v1-12
|
||||
}
|
||||
; sameln: function %memory(i32) {
|
||||
; sameln: function %memory(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: $v2 = load.i64 $v1
|
||||
; nextln: $v3 = load.i64 aligned $v1
|
||||
@@ -185,7 +185,7 @@ ebb0(v1: i32):
|
||||
regmove v1, %20 -> %10
|
||||
return
|
||||
}
|
||||
; sameln: function %diversion(i32) {
|
||||
; sameln: function %diversion(i32) native {
|
||||
; nextln: ebb0($v1: i32):
|
||||
; nextln: regmove $v1, %10 -> %20
|
||||
; nextln: regmove $v1, %20 -> %10
|
||||
|
||||
Reference in New Issue
Block a user