Add a calling convention to all function signatures.

A CallConv enum on every function signature makes it possible to
generate calls to functions with different calling conventions within
the same ISA / within a single function.

The calling conventions also serve as a way of customizing Cretonne's
behavior when embedded inside a VM. As an example, the SpiderWASM
calling convention is used to compile WebAssembly functions that run
inside the SpiderMonkey virtual machine.

All function signatures must have a calling convention at the end, so
this changes the textual IL syntax.

Before:

    sig1 = signature(i32, f64) -> f64

After

    sig1 = (i32, f64) -> f64 native
    sig2 = (i32) spiderwasm

When printing functions, the signature goes after the return types:

    function %r1() -> i32, f32 spiderwasm {
    ebb1:
        ...
    }

In the parser, this calling convention is optional and defaults to
"native". This is mostly to avoid updating all the existing test cases
under filetests/. When printing a function, the calling convention is
always included, including for "native" functions.
This commit is contained in:
Jakob Stoklund Olesen
2017-08-02 16:40:35 -07:00
committed by Jakob Stoklund Olesen
parent bf1820587c
commit 7f3b807597
27 changed files with 211 additions and 144 deletions

View File

@@ -5,7 +5,7 @@ function %minimal() {
ebb0:
trap
}
; sameln: function %minimal() {
; sameln: function %minimal() native {
; nextln: ebb0:
; nextln: trap
; nextln: }
@@ -18,7 +18,7 @@ ebb0:
v1 = iconst.i8 6
v2 = ishl v0, v1
}
; sameln: function %ivalues() {
; sameln: function %ivalues() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32 2
; nextln: $v1 = iconst.i8 6
@@ -34,7 +34,7 @@ ebb0:
v2 = bextend.b32 v1
v3 = bxor v0, v2
}
; sameln: function %bvalues() {
; sameln: function %bvalues() native {
; nextln: ebb0:
; nextln: $v0 = bconst.b32 true
; nextln: $v1 = bconst.b8 false
@@ -47,7 +47,7 @@ function %select() {
ebb0(v90: i32, v91: i32, v92: b1):
v0 = select v92, v90, v91
}
; sameln: function %select() {
; sameln: function %select() native {
; nextln: ebb0($v90: i32, $v91: i32, $v92: b1):
; nextln: $v0 = select $v92, $v90, $v91
; nextln: }
@@ -59,7 +59,7 @@ ebb0:
v1 = extractlane v0, 3
v2 = insertlane v0, 1, v1
}
; sameln: function %lanes() {
; sameln: function %lanes() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32x4 2
; nextln: $v1 = extractlane $v0, 3
@@ -75,7 +75,7 @@ ebb0(v90: i32, v91: i32):
v3 = irsub_imm v91, 45
br_icmp eq v90, v91, ebb0(v91, v90)
}
; sameln: function %icmp(i32, i32) {
; sameln: function %icmp(i32, i32) native {
; nextln: ebb0($v90: i32, $v91: i32):
; nextln: $v0 = icmp eq $v90, $v91
; nextln: $v1 = icmp ult $v90, $v91
@@ -91,7 +91,7 @@ ebb0(v90: f32, v91: f32):
v1 = fcmp uno v90, v91
v2 = fcmp lt v90, v91
}
; sameln: function %fcmp(f32, f32) {
; sameln: function %fcmp(f32, f32) native {
; nextln: ebb0($v90: f32, $v91: f32):
; nextln: $v0 = fcmp eq $v90, $v91
; nextln: $v1 = fcmp uno $v90, $v91
@@ -105,7 +105,7 @@ ebb0(v90: i32, v91: f32):
v0 = bitcast.i8x4 v90
v1 = bitcast.i32 v91
}
; sameln: function %bitcast(i32, f32) {
; sameln: function %bitcast(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: $v0 = bitcast.i8x4 $v90
; nextln: $v1 = bitcast.i32 $v91
@@ -124,7 +124,7 @@ ebb0:
stack_store v1, ss10+2
stack_store v2, ss2
}
; sameln: function %stack() {
; sameln: function %stack() native {
; nextln: $ss10 = spill_slot 8
; nextln: $ss2 = local 4
; nextln: $ss3 = incoming_arg 4, offset 8
@@ -144,7 +144,7 @@ ebb0(v1: i32):
v3 = heap_load.f32 v1+12
heap_store v3, v1
}
; sameln: function %heap(i32) {
; sameln: function %heap(i32) native {
; nextln: ebb0($v1: i32):
; nextln: $v2 = heap_load.f32 $v1
; nextln: $v3 = heap_load.f32 $v1+12
@@ -164,7 +164,7 @@ ebb0(v1: i32):
store aligned v3, v1+12
store notrap aligned v3, v1-12
}
; sameln: function %memory(i32) {
; sameln: function %memory(i32) native {
; nextln: ebb0($v1: i32):
; nextln: $v2 = load.i64 $v1
; nextln: $v3 = load.i64 aligned $v1
@@ -185,7 +185,7 @@ ebb0(v1: i32):
regmove v1, %20 -> %10
return
}
; sameln: function %diversion(i32) {
; sameln: function %diversion(i32) native {
; nextln: ebb0($v1: i32):
; nextln: regmove $v1, %10 -> %20
; nextln: regmove $v1, %20 -> %10