[s390x, abi_impl] Add i128 support (#4598)

This adds full i128 support to the s390x target, including new filetests
and enabling the existing i128 runtest on s390x.

The ABI requires that i128 is passed and returned via implicit pointer,
but the front end still generates direct i128 types in call.  This means
we have to implement ABI support to implicitly convert i128 types to
pointers when passing arguments.

To do so, we add a new variant ABIArg::ImplicitArg.  This acts like
StructArg, except that the value type is the actual target type,
not a pointer type.  The required conversions have to be inserted
in the prologue and at function call sites.

Note that when dereferencing the implicit pointer in the prologue,
we may require a temp register: the pointer may be passed on the
stack so it needs to be loaded first, but the value register may
be in the wrong class for pointer values.  In this case, we use
the "stack limit" register, which should be available at this
point in the prologue.

For return values, we use a mechanism similar to the one used for
supporting multiple return values in the Wasmtime ABI.  The only
difference is that the hidden pointer to the return buffer must
be the *first*, not last, argument in this case.

(This implements the second half of issue #4565.)
This commit is contained in:
Ulrich Weigand
2022-08-04 22:41:26 +02:00
committed by GitHub
parent dc8362ceec
commit b17b1eb25d
46 changed files with 2424 additions and 166 deletions

View File

@@ -1,9 +1,29 @@
test compile precise-output
target s390x
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; UEXTEND
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %uextend_i64_i128(i64) -> i128 {
block0(v0: i64):
v1 = uextend.i128 v0
return v1
}
; block0:
; vgbm %v5, 0
; vlvgg %v5, %r3, 1
; vst %v5, 0(%r2)
; br %r14
function %uextend_i32_i128(i32) -> i128 {
block0(v0: i32):
v1 = uextend.i128 v0
return v1
}
; block0:
; vgbm %v5, 0
; vlvgf %v5, %r3, 3
; vst %v5, 0(%r2)
; br %r14
function %uextend_i32_i64(i32) -> i64 {
block0(v0: i32):
@@ -15,6 +35,18 @@ block0(v0: i32):
; llgfr %r2, %r2
; br %r14
function %uextend_i16_i128(i16) -> i128 {
block0(v0: i16):
v1 = uextend.i128 v0
return v1
}
; block0:
; vgbm %v5, 0
; vlvgh %v5, %r3, 7
; vst %v5, 0(%r2)
; br %r14
function %uextend_i16_i64(i16) -> i64 {
block0(v0: i16):
v1 = uextend.i64 v0
@@ -35,6 +67,18 @@ block0(v0: i16):
; llhr %r2, %r2
; br %r14
function %uextend_i8_i128(i8) -> i128 {
block0(v0: i8):
v1 = uextend.i128 v0
return v1
}
; block0:
; vgbm %v5, 0
; vlvgb %v5, %r3, 15
; vst %v5, 0(%r2)
; br %r14
function %uextend_i8_i64(i8) -> i64 {
block0(v0: i8):
v1 = uextend.i64 v0
@@ -65,6 +109,31 @@ block0(v0: i8):
; llcr %r2, %r2
; br %r14
function %sextend_i64_i128(i64) -> i128 {
block0(v0: i64):
v1 = sextend.i128 v0
return v1
}
; block0:
; srag %r4, %r3, 63
; vlvgp %v7, %r4, %r3
; vst %v7, 0(%r2)
; br %r14
function %sextend_i32_i128(i32) -> i128 {
block0(v0: i32):
v1 = sextend.i128 v0
return v1
}
; block0:
; lgfr %r3, %r3
; srag %r5, %r3, 63
; vlvgp %v17, %r5, %r3
; vst %v17, 0(%r2)
; br %r14
function %sextend_i32_i64(i32) -> i64 {
block0(v0: i32):
v1 = sextend.i64 v0
@@ -75,6 +144,19 @@ block0(v0: i32):
; lgfr %r2, %r2
; br %r14
function %sextend_i16_i128(i16) -> i128 {
block0(v0: i16):
v1 = sextend.i128 v0
return v1
}
; block0:
; lghr %r3, %r3
; srag %r5, %r3, 63
; vlvgp %v17, %r5, %r3
; vst %v17, 0(%r2)
; br %r14
function %sextend_i16_i64(i16) -> i64 {
block0(v0: i16):
v1 = sextend.i64 v0
@@ -95,6 +177,19 @@ block0(v0: i16):
; lhr %r2, %r2
; br %r14
function %sextend_i8_i128(i8) -> i128 {
block0(v0: i8):
v1 = sextend.i128 v0
return v1
}
; block0:
; lgbr %r3, %r3
; srag %r5, %r3, 63
; vlvgp %v17, %r5, %r3
; vst %v17, 0(%r2)
; br %r14
function %sextend_i8_i64(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
@@ -125,6 +220,50 @@ block0(v0: i8):
; lbr %r2, %r2
; br %r14
function %ireduce_i128_i64(i128) -> i64 {
block0(v0: i128):
v1 = ireduce.i64 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %ireduce_i128_i32(i128) -> i32 {
block0(v0: i128):
v1 = ireduce.i32 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %ireduce_i128_i16(i128) -> i16 {
block0(v0: i128):
v1 = ireduce.i16 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %ireduce_i128_i8(i128) -> i8 {
block0(v0: i128):
v1 = ireduce.i8 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %ireduce_i64_i32(i64, i64) -> i32 {
block0(v0: i64, v1: i64):
v2 = ireduce.i32 v1
@@ -185,6 +324,29 @@ block0(v0: i16, v1: i16):
; lgr %r2, %r3
; br %r14
function %bextend_b64_b128(b64) -> b128 {
block0(v0: b64):
v1 = bextend.b128 v0
return v1
}
; block0:
; vlvgp %v5, %r3, %r3
; vst %v5, 0(%r2)
; br %r14
function %bextend_b32_b128(b32) -> b128 {
block0(v0: b32):
v1 = bextend.b128 v0
return v1
}
; block0:
; lgfr %r3, %r3
; vlvgp %v7, %r3, %r3
; vst %v7, 0(%r2)
; br %r14
function %bextend_b32_b64(b32) -> b64 {
block0(v0: b32):
v1 = bextend.b64 v0
@@ -195,6 +357,18 @@ block0(v0: b32):
; lgfr %r2, %r2
; br %r14
function %bextend_b16_b128(b16) -> b128 {
block0(v0: b16):
v1 = bextend.b128 v0
return v1
}
; block0:
; lghr %r3, %r3
; vlvgp %v7, %r3, %r3
; vst %v7, 0(%r2)
; br %r14
function %bextend_b16_b64(b16) -> b64 {
block0(v0: b16):
v1 = bextend.b64 v0
@@ -215,6 +389,18 @@ block0(v0: b16):
; lhr %r2, %r2
; br %r14
function %bextend_b8_b128(b8) -> b128 {
block0(v0: b8):
v1 = bextend.b128 v0
return v1
}
; block0:
; lgbr %r3, %r3
; vlvgp %v7, %r3, %r3
; vst %v7, 0(%r2)
; br %r14
function %bextend_b8_b64(b8) -> b64 {
block0(v0: b8):
v1 = bextend.b64 v0
@@ -245,6 +431,19 @@ block0(v0: b8):
; lbr %r2, %r2
; br %r14
function %bextend_b1_b128(b1) -> b128 {
block0(v0: b1):
v1 = bextend.b128 v0
return v1
}
; block0:
; sllg %r3, %r3, 63
; srag %r5, %r3, 63
; vlvgp %v17, %r5, %r5
; vst %v17, 0(%r2)
; br %r14
function %bextend_b1_b64(b1) -> b64 {
block0(v0: b1):
v1 = bextend.b64 v0
@@ -289,6 +488,61 @@ block0(v0: b1):
; srak %r2, %r5, 31
; br %r14
function %breduce_b128_b64(b128) -> b64 {
block0(v0: b128):
v1 = breduce.b64 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %breduce_b128_b32(b128) -> b32 {
block0(v0: b128):
v1 = breduce.b32 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %breduce_b128_b16(b128) -> b16 {
block0(v0: b128):
v1 = breduce.b16 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %breduce_b128_b8(b128) -> b8 {
block0(v0: b128):
v1 = breduce.b8 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %breduce_b128_b1(b128) -> b1 {
block0(v0: b128):
v1 = breduce.b1 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %breduce_b64_b32(b64, b64) -> b32 {
block0(v0: b64, v1: b64):
v2 = breduce.b32 v1
@@ -389,6 +643,72 @@ block0(v0: b8, v1: b8):
; lgr %r2, %r3
; br %r14
function %bmask_b128_i128(b128) -> i128 {
block0(v0: b128):
v1 = bmask.i128 v0
return v1
}
; block0:
; vl %v0, 0(%r3)
; vst %v0, 0(%r2)
; br %r14
function %bmask_b128_i64(b128) -> i64 {
block0(v0: b128):
v1 = bmask.i64 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %bmask_b128_i32(b128) -> i32 {
block0(v0: b128):
v1 = bmask.i32 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %bmask_b128_i16(b128) -> i16 {
block0(v0: b128):
v1 = bmask.i16 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %bmask_b128_i8(b128) -> i8 {
block0(v0: b128):
v1 = bmask.i8 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvg %r2, %v0, 1
; br %r14
function %bmask_b64_i128(b64, b64) -> i128 {
block0(v0: b64, v1: b64):
v2 = bmask.i128 v1
return v2
}
; block0:
; vlvgp %v7, %r4, %r4
; vst %v7, 0(%r2)
; br %r14
function %bmask_b64_i64(b64, b64) -> i64 {
block0(v0: b64, v1: b64):
v2 = bmask.i64 v1
@@ -429,6 +749,18 @@ block0(v0: b64, v1: b64):
; lgr %r2, %r3
; br %r14
function %bmask_b32_i128(b32, b32) -> i128 {
block0(v0: b32, v1: b32):
v2 = bmask.i128 v1
return v2
}
; block0:
; lgfr %r5, %r4
; vlvgp %v17, %r5, %r5
; vst %v17, 0(%r2)
; br %r14
function %bmask_b32_i64(b32, b32) -> i64 {
block0(v0: b32, v1: b32):
v2 = bmask.i64 v1
@@ -469,6 +801,18 @@ block0(v0: b32, v1: b32):
; lgr %r2, %r3
; br %r14
function %bmask_b16_i128(b16, b16) -> i128 {
block0(v0: b16, v1: b16):
v2 = bmask.i128 v1
return v2
}
; block0:
; lghr %r5, %r4
; vlvgp %v17, %r5, %r5
; vst %v17, 0(%r2)
; br %r14
function %bmask_b16_i64(b16, b16) -> i64 {
block0(v0: b16, v1: b16):
v2 = bmask.i64 v1
@@ -509,6 +853,18 @@ block0(v0: b16, v1: b16):
; lgr %r2, %r3
; br %r14
function %bmask_b8_i128(b8, b8) -> i128 {
block0(v0: b8, v1: b8):
v2 = bmask.i128 v1
return v2
}
; block0:
; lgbr %r5, %r4
; vlvgp %v17, %r5, %r5
; vst %v17, 0(%r2)
; br %r14
function %bmask_b8_i64(b8, b8) -> i64 {
block0(v0: b8, v1: b8):
v2 = bmask.i64 v1
@@ -549,6 +905,19 @@ block0(v0: b8, v1: b8):
; lgr %r2, %r3
; br %r14
function %bmask_b1_i128(b1, b1) -> i128 {
block0(v0: b1, v1: b1):
v2 = bmask.i128 v1
return v2
}
; block0:
; sllg %r5, %r4, 63
; srag %r3, %r5, 63
; vlvgp %v19, %r3, %r3
; vst %v19, 0(%r2)
; br %r14
function %bmask_b1_i64(b1, b1) -> i64 {
block0(v0: b1, v1: b1):
v2 = bmask.i64 v1
@@ -593,6 +962,80 @@ block0(v0: b1, v1: b1):
; srak %r2, %r3, 31
; br %r14
function %bint_b128_i128(b128) -> i128 {
block0(v0: b128):
v1 = bint.i128 v0
return v1
}
; block0:
; vl %v0, 0(%r3)
; bras %r1, 20 ; data.u128 0x00000000000000000000000000000001 ; vl %v5, 0(%r1)
; vn %v7, %v0, %v5
; vst %v7, 0(%r2)
; br %r14
function %bint_b128_i64(b128) -> i64 {
block0(v0: b128):
v1 = bint.i64 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvb %r2, %v0, 15
; nill %r2, 1
; br %r14
function %bint_b128_i32(b128) -> i32 {
block0(v0: b128):
v1 = bint.i32 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvb %r2, %v0, 15
; nill %r2, 1
; br %r14
function %bint_b128_i16(b128) -> i16 {
block0(v0: b128):
v1 = bint.i16 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvb %r2, %v0, 15
; nill %r2, 1
; br %r14
function %bint_b128_i8(b128) -> i8 {
block0(v0: b128):
v1 = bint.i8 v0
return v1
}
; block0:
; vl %v0, 0(%r2)
; vlgvb %r2, %v0, 15
; nill %r2, 1
; br %r14
function %bint_b64_i128(b64) -> i128 {
block0(v0: b64):
v1 = bint.i128 v0
return v1
}
; block0:
; nill %r3, 1
; vgbm %v16, 0
; vlvgb %v16, %r3, 15
; vst %v16, 0(%r2)
; br %r14
function %bint_b64_i64(b64) -> i64 {
block0(v0: b64):
v1 = bint.i64 v0
@@ -634,6 +1077,19 @@ block0(v0: b64):
; nill %r2, 1
; br %r14
function %bint_b32_i128(b32) -> i128 {
block0(v0: b32):
v1 = bint.i128 v0
return v1
}
; block0:
; nill %r3, 1
; vgbm %v16, 0
; vlvgb %v16, %r3, 15
; vst %v16, 0(%r2)
; br %r14
function %bint_b32_i64(b32) -> i64 {
block0(v0: b32):
v1 = bint.i64 v0
@@ -675,6 +1131,19 @@ block0(v0: b32):
; nill %r2, 1
; br %r14
function %bint_b16_i128(b16) -> i128 {
block0(v0: b16):
v1 = bint.i128 v0
return v1
}
; block0:
; nill %r3, 1
; vgbm %v16, 0
; vlvgb %v16, %r3, 15
; vst %v16, 0(%r2)
; br %r14
function %bint_b16_i64(b16) -> i64 {
block0(v0: b16):
v1 = bint.i64 v0
@@ -716,6 +1185,19 @@ block0(v0: b16):
; nill %r2, 1
; br %r14
function %bint_b8_i128(b8) -> i128 {
block0(v0: b8):
v1 = bint.i128 v0
return v1
}
; block0:
; nill %r3, 1
; vgbm %v16, 0
; vlvgb %v16, %r3, 15
; vst %v16, 0(%r2)
; br %r14
function %bint_b8_i64(b8) -> i64 {
block0(v0: b8):
v1 = bint.i64 v0
@@ -757,6 +1239,19 @@ block0(v0: b8):
; nill %r2, 1
; br %r14
function %bint_b1_i128(b1) -> i128 {
block0(v0: b1):
v1 = bint.i128 v0
return v1
}
; block0:
; nill %r3, 1
; vgbm %v16, 0
; vlvgb %v16, %r3, 15
; vst %v16, 0(%r2)
; br %r14
function %bint_b1_i64(b1) -> i64 {
block0(v0: b1):
v1 = bint.i64 v0