Rename CallConv::Native to CallConv::SystemV. (#291)

To keep cross-compiling straightforward, Cretonne shouldn't have any
behavior that depends on the host. This renames the "Native" calling
convention to "SystemV", which has a defined meaning for each target,
so that it's clear that the calling convention doesn't change
depending on what host Cretonne is running on.
This commit is contained in:
Dan Gohman
2018-03-30 12:32:14 -07:00
committed by GitHub
parent 6606b88136
commit 9e4ab7dc86
44 changed files with 157 additions and 156 deletions

View File

@@ -5,7 +5,7 @@ function %minimal() {
ebb0:
trap user0
}
; sameln: function %minimal() native {
; sameln: function %minimal() system_v {
; nextln: ebb0:
; nextln: trap user0
; nextln: }
@@ -18,7 +18,7 @@ ebb0:
v1 = iconst.i8 6
v2 = ishl v0, v1
}
; sameln: function %ivalues() native {
; sameln: function %ivalues() system_v {
; nextln: ebb0:
; nextln: v0 = iconst.i32 2
; nextln: v1 = iconst.i8 6
@@ -34,7 +34,7 @@ ebb0:
v2 = bextend.b32 v1
v3 = bxor v0, v2
}
; sameln: function %bvalues() native {
; sameln: function %bvalues() system_v {
; nextln: ebb0:
; nextln: v0 = bconst.b32 true
; nextln: v1 = bconst.b8 false
@@ -47,17 +47,17 @@ function %select() {
ebb0(v90: i32, v91: i32, v92: b1):
v0 = select v92, v90, v91
}
; sameln: function %select() native {
; sameln: function %select() system_v {
; nextln: ebb0(v90: i32, v91: i32, v92: b1):
; nextln: v0 = select v92, v90, v91
; nextln: }
; Polymorphic instruction controlled by third operand.
function %selectif() native {
function %selectif() system_v {
ebb0(v95: i32, v96: i32, v97: b1):
v98 = selectif.i32 eq v97, v95, v96
}
; sameln: function %selectif() native {
; sameln: function %selectif() system_v {
; nextln: ebb0(v95: i32, v96: i32, v97: b1):
; nextln: v98 = selectif.i32 eq v97, v95, v96
; nextln: }
@@ -69,7 +69,7 @@ ebb0:
v1 = extractlane v0, 3
v2 = insertlane v0, 1, v1
}
; sameln: function %lanes() native {
; sameln: function %lanes() system_v {
; nextln: ebb0:
; nextln: v0 = iconst.i32x4 2
; nextln: v1 = extractlane v0, 3
@@ -85,7 +85,7 @@ ebb0(v90: i32, v91: i32):
v3 = irsub_imm v91, 45
br_icmp eq v90, v91, ebb0(v91, v90)
}
; sameln: function %icmp(i32, i32) native {
; sameln: function %icmp(i32, i32) system_v {
; nextln: ebb0(v90: i32, v91: i32):
; nextln: v0 = icmp eq v90, v91
; nextln: v1 = icmp ult v90, v91
@@ -101,7 +101,7 @@ ebb0(v90: f32, v91: f32):
v1 = fcmp uno v90, v91
v2 = fcmp lt v90, v91
}
; sameln: function %fcmp(f32, f32) native {
; sameln: function %fcmp(f32, f32) system_v {
; nextln: ebb0(v90: f32, v91: f32):
; nextln: v0 = fcmp eq v90, v91
; nextln: v1 = fcmp uno v90, v91
@@ -115,7 +115,7 @@ ebb0(v90: i32, v91: f32):
v0 = bitcast.i8x4 v90
v1 = bitcast.i32 v91
}
; sameln: function %bitcast(i32, f32) native {
; sameln: function %bitcast(i32, f32) system_v {
; nextln: ebb0(v90: i32, v91: f32):
; nextln: v0 = bitcast.i8x4 v90
; nextln: v1 = bitcast.i32 v91
@@ -135,7 +135,7 @@ ebb0:
stack_store v1, ss10+2
stack_store v2, ss2
}
; sameln: function %stack() native {
; sameln: function %stack() system_v {
; check: ss2 = explicit_slot 4
; check: ss3 = incoming_arg 4, offset 8
; check: ss4 = outgoing_arg 4
@@ -162,7 +162,7 @@ ebb0(v1: i32):
store aligned v3, v1+12
store notrap aligned v3, v1-12
}
; sameln: function %memory(i32) native {
; sameln: function %memory(i32) system_v {
; nextln: ebb0(v1: i32):
; nextln: v2 = load.i64 v1
; nextln: v3 = load.i64 aligned v1
@@ -187,7 +187,7 @@ ebb0(v1: i32):
regfill v1, ss0 -> %10
return
}
; sameln: function %diversion(i32) native {
; sameln: function %diversion(i32) system_v {
; nextln: ss0 = spill_slot 4
; check: ebb0(v1: i32):
; nextln: regmove v1, %10 -> %20
@@ -204,7 +204,7 @@ ebb0:
copy_special %20 -> %10
return
}
; sameln: function %copy_special() native {
; sameln: function %copy_special() system_v {
; nextln: ebb0:
; nextln: copy_special %10 -> %20
; nextln: copy_special %20 -> %10