Rename CallConv::Native to CallConv::SystemV. (#291)

To keep cross-compiling straightforward, Cretonne shouldn't have any
behavior that depends on the host. This renames the "Native" calling
convention to "SystemV", which has a defined meaning for each target,
so that it's clear that the calling convention doesn't change
depending on what host Cretonne is running on.
This commit is contained in:
Dan Gohman
2018-03-30 12:32:14 -07:00
committed by GitHub
parent 6606b88136
commit 9e4ab7dc86
44 changed files with 157 additions and 156 deletions

View File

@@ -109,7 +109,7 @@ ebb1(v10: i32):
return v11
}
function %gvn_unremovable_phi(i32) native {
function %gvn_unremovable_phi(i32) system_v {
ebb0(v0: i32):
v2 = iconst.i32 0
jump ebb2(v2, v0)

View File

@@ -5,12 +5,12 @@ isa intel haswell
; Reported as https://github.com/Cretonne/cretonne/issues/207
;
; The coalescer creates a virtual register with two interfering values.
function %pr207(i64 vmctx, i32, i32) -> i32 native {
function %pr207(i64 vmctx, i32, i32) -> i32 system_v {
gv0 = vmctx-8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
sig0 = (i64 vmctx, i32, i32) -> i32 native
sig1 = (i64 vmctx, i32, i32, i32) -> i32 native
sig2 = (i64 vmctx, i32, i32, i32) -> i32 native
sig0 = (i64 vmctx, i32, i32) -> i32 system_v
sig1 = (i64 vmctx, i32, i32, i32) -> i32 system_v
sig2 = (i64 vmctx, i32, i32, i32) -> i32 system_v
fn0 = sig0 u0:2
fn1 = sig1 u0:0
fn2 = sig2 u0:1
@@ -1034,10 +1034,10 @@ ebb92(v767: i32):
}
; Same problem from musl.wasm.
function %musl(f64 [%xmm0], i64 vmctx [%rdi]) -> f64 [%xmm0] native {
function %musl(f64 [%xmm0], i64 vmctx [%rdi]) -> f64 [%xmm0] system_v {
gv0 = vmctx
heap0 = static gv0, min 0, bound 0x0001_0000_0000, guard 0x8000_0000
sig0 = (f64 [%xmm0], i32 [%rdi], i64 vmctx [%rsi]) -> f64 [%xmm0] native
sig0 = (f64 [%xmm0], i32 [%rdi], i64 vmctx [%rsi]) -> f64 [%xmm0] system_v
fn0 = sig0 u0:517
ebb0(v0: f64, v1: i64):

View File

@@ -5,7 +5,7 @@ isa intel haswell
; Reported as https://github.com/Cretonne/cretonne/issues/216 from the Binaryen fuzzer.
;
; The (old) coalescer creates a virtual register with two identical values.
function %pr216(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] native {
function %pr216(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v {
ebb0(v0: i32, v1: i64):
v3 = iconst.i64 0
v5 = iconst.i32 0

View File

@@ -2,7 +2,7 @@ test regalloc
set is_64bit
isa intel haswell
function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8]) native {
function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8]) system_v {
gv0 = vmctx
heap0 = static gv0, min 0, bound 0x0001_0000_0000, guard 0x8000_0000

View File

@@ -9,7 +9,7 @@ isa intel haswell
;
; Test case by binaryen fuzzer!
function %pr215(i64 vmctx [%rdi]) native {
function %pr215(i64 vmctx [%rdi]) system_v {
ebb0(v0: i64):
v10 = iconst.i64 0
v1 = bitcast.f64 v10

View File

@@ -2,7 +2,7 @@ test regalloc
set is_64bit=1
isa intel haswell
function %foo() native {
function %foo() system_v {
ebb4:
v3 = iconst.i32 0
jump ebb3

View File

@@ -11,7 +11,7 @@ isa intel
; This ended up confusong the constraint solver which had not made a record of
; the fixed register assignment for v9 since it was already in the correct
; register.
function %pr147(i32) -> i32 native {
function %pr147(i32) -> i32 system_v {
ebb0(v0: i32):
v1 = iconst.i32 0
v2 = iconst.i32 1

View File

@@ -2,7 +2,7 @@ test regalloc
set is_64bit=1
isa intel haswell
function %test(i64) -> i64 native {
function %test(i64) -> i64 system_v {
ebb0(v0: i64):
v2 = iconst.i64 12
; This division clobbers two of its fixed input registers on Intel.

View File

@@ -11,11 +11,11 @@ isa intel haswell
;
; The problem was the reload pass rewriting EBB arguments on "brnz v9, ebb3(v9)"
function %pr208(i64 vmctx [%rdi]) native {
function %pr208(i64 vmctx [%rdi]) system_v {
gv0 = vmctx-8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
sig0 = (i64 vmctx [%rdi]) -> i32 [%rax] native
sig1 = (i64 vmctx [%rdi], i32 [%rsi]) native
sig0 = (i64 vmctx [%rdi]) -> i32 [%rax] system_v
sig1 = (i64 vmctx [%rdi], i32 [%rsi]) system_v
fn0 = sig0 u0:1
fn1 = sig1 u0:3

View File

@@ -5,7 +5,7 @@ isa riscv enable_e
; Check that we can handle a function return value that got spilled.
function %spill_return() -> i32 {
fn0 = function %foo() -> i32 native
fn0 = function %foo() -> i32 system_v
ebb0:
v0 = call fn0()

View File

@@ -1,7 +1,7 @@
test regalloc
isa intel haswell
function %pr165() native {
function %pr165() system_v {
ebb0:
v0 = iconst.i32 0x0102_0304
v1 = iconst.i32 0x1102_0304
@@ -19,7 +19,7 @@ ebb0:
; Same as above, but use so many registers that spilling is required.
; Note: This is also a candidate for using xchg instructions.
function %emergency_spill() native {
function %emergency_spill() system_v {
ebb0:
v0 = iconst.i32 0x0102_0304
v1 = iconst.i32 0x1102_0304

View File

@@ -13,7 +13,7 @@ isa intel
;
; The spiller was not releasing register pressure for dead EBB parameters.
function %pr223(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] native {
function %pr223(i32 [%rdi], i64 vmctx [%rsi]) -> i64 [%rax] system_v {
ebb0(v0: i32, v1: i64):
v2 = iconst.i32 0
v3 = iconst.i64 0

View File

@@ -93,7 +93,7 @@ ebb0(v0: i32):
; The same value used as indirect callee and argument.
function %doubleuse_icall1(i32) {
sig0 = (i32) native
sig0 = (i32) system_v
ebb0(v0: i32):
; not:copy
call_indirect sig0, v0(v0)
@@ -102,7 +102,7 @@ ebb0(v0: i32):
; The same value used as indirect callee and two arguments.
function %doubleuse_icall2(i32) {
sig0 = (i32, i32) native
sig0 = (i32, i32) system_v
ebb0(v0: i32):
; check: $(c=$V) = copy v0
call_indirect sig0, v0(v0, v0)