s390x: Enable more runtests, and fix a few bugs (#4516)

This enables more runtests to be executed on s390x.  Doing so
uncovered a two back-end bugs, which are fixed as well:

- The result of cls was always off by one.
- The result of popcnt.i16 has uninitialized high bits.

In addition, I found a bug in the load-op-store.clif test case:
     v3 = heap_addr.i64 heap0, v1, 4
     v4 = iconst.i64 42
     store.i32 v4, v3
This was clearly intended to perform a 32-bit store, but
actually performs a 64-bit store (it seems the type annotation
of the store opcode is ignored, and the type of the operand
is used instead).  That bug did not show any noticable symptoms
on little-endian architectures, but broke on big-endian.
This commit is contained in:
Ulrich Weigand
2022-07-25 21:37:06 +02:00
committed by GitHub
parent 7c67e620c4
commit dd40bf075a
17 changed files with 42 additions and 16 deletions

View File

@@ -1149,17 +1149,26 @@
;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; The result of cls is not supposed to count the sign bit itself, just
;; additional copies of it. Therefore, when computing cls in terms of clz,
;; we need to subtract one. Fold this into the offset computation.
(decl cls_offset (Type Reg) Reg)
(rule (cls_offset $I8 x) (add_simm16 $I8 x -57))
(rule (cls_offset $I16 x) (add_simm16 $I16 x -49))
(rule (cls_offset $I32 x) (add_simm16 $I32 x -33))
(rule (cls_offset $I64 x) (add_simm16 $I64 x -1))
;; Count leading sign-bit copies. We don't have any instruction for that, ;; Count leading sign-bit copies. We don't have any instruction for that,
;; so we instead count the leading zeros after inverting the input if negative, ;; so we instead count the leading zeros after inverting the input if negative,
;; i.e. computing ;; i.e. computing
;; cls(x) == clz(x ^ (x >> 63)) ;; cls(x) == clz(x ^ (x >> 63)) - 1
;; where x is the sign-extended input. ;; where x is the sign-extended input.
(rule (lower (has_type (fits_in_64 ty) (cls x))) (rule (lower (has_type (fits_in_64 ty) (cls x)))
(let ((ext_reg Reg (put_in_reg_sext64 x)) (let ((ext_reg Reg (put_in_reg_sext64 x))
(signbit_copies Reg (ashr_imm $I64 ext_reg 63)) (signbit_copies Reg (ashr_imm $I64 ext_reg 63))
(inv_reg Reg (xor_reg $I64 ext_reg signbit_copies)) (inv_reg Reg (xor_reg $I64 ext_reg signbit_copies))
(clz RegPair (clz_reg 64 inv_reg))) (clz RegPair (clz_reg 64 inv_reg)))
(clz_offset ty (regpair_hi clz)))) (cls_offset ty (regpair_hi clz))))
;;;; Rules for `ctz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `ctz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -1214,12 +1223,14 @@
;; of each input byte separately, so we need to accumulate those partial ;; of each input byte separately, so we need to accumulate those partial
;; results via a series of log2(type size in bytes) - 1 additions. We ;; results via a series of log2(type size in bytes) - 1 additions. We
;; accumulate in the high byte, so that a final right shift will zero out ;; accumulate in the high byte, so that a final right shift will zero out
;; any unrelated bits to give a clean result. ;; any unrelated bits to give a clean result. (This does not work with
;; $I16, where we instead accumulate in the low byte and clear high bits
;; via an explicit and operation.)
(rule (lower (has_type (and (mie2_disabled) $I16) (popcnt x))) (rule (lower (has_type (and (mie2_disabled) $I16) (popcnt x)))
(let ((cnt2 Reg (popcnt_byte x)) (let ((cnt2 Reg (popcnt_byte x))
(cnt1 Reg (add_reg $I32 cnt2 (lshl_imm $I32 cnt2 8)))) (cnt1 Reg (add_reg $I32 cnt2 (lshr_imm $I32 cnt2 8))))
(lshr_imm $I32 cnt1 8))) (and_uimm16shifted $I32 cnt1 (uimm16shifted 255 0))))
(rule (lower (has_type (and (mie2_disabled) $I32) (popcnt x))) (rule (lower (has_type (and (mie2_disabled) $I32) (popcnt x)))
(let ((cnt4 Reg (popcnt_byte x)) (let ((cnt4 Reg (popcnt_byte x))

View File

@@ -93,7 +93,7 @@ block0(v0: i64):
; srag %r5, %r2, 63 ; srag %r5, %r2, 63
; xgrk %r3, %r2, %r5 ; xgrk %r3, %r2, %r5
; flogr %r0, %r3 ; flogr %r0, %r3
; lgr %r2, %r0 ; aghik %r2, %r0, -1
; br %r14 ; br %r14
function %cls_i32(i32) -> i32 { function %cls_i32(i32) -> i32 {
@@ -107,7 +107,7 @@ block0(v0: i32):
; srag %r3, %r5, 63 ; srag %r3, %r5, 63
; xgr %r5, %r3 ; xgr %r5, %r3
; flogr %r0, %r5 ; flogr %r0, %r5
; ahik %r2, %r0, -32 ; ahik %r2, %r0, -33
; br %r14 ; br %r14
function %cls_i16(i16) -> i16 { function %cls_i16(i16) -> i16 {
@@ -121,7 +121,7 @@ block0(v0: i16):
; srag %r3, %r5, 63 ; srag %r3, %r5, 63
; xgr %r5, %r3 ; xgr %r5, %r3
; flogr %r0, %r5 ; flogr %r0, %r5
; ahik %r2, %r0, -48 ; ahik %r2, %r0, -49
; br %r14 ; br %r14
function %cls_i8(i8) -> i8 { function %cls_i8(i8) -> i8 {
@@ -135,7 +135,7 @@ block0(v0: i8):
; srag %r3, %r5, 63 ; srag %r3, %r5, 63
; xgr %r5, %r3 ; xgr %r5, %r3
; flogr %r0, %r5 ; flogr %r0, %r5
; ahik %r2, %r0, -56 ; ahik %r2, %r0, -57
; br %r14 ; br %r14
function %ctz_i64(i64) -> i64 { function %ctz_i64(i64) -> i64 {
@@ -238,9 +238,9 @@ block0(v0: i16):
; block0: ; block0:
; popcnt %r5, %r2 ; popcnt %r5, %r2
; sllk %r3, %r5, 8 ; srlk %r3, %r5, 8
; ar %r5, %r3 ; ark %r2, %r5, %r3
; srlk %r2, %r5, 8 ; nill %r2, 255
; br %r14 ; br %r14
function %popcnt_i8(i8) -> i8 { function %popcnt_i8(i8) -> i8 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
function %bint_b1_i8_true() -> i8 { function %bint_b1_i8_true() -> i8 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
; not implemented on `x86_64` ; not implemented on `x86_64`
function %cls_i8(i8) -> i8 { function %cls_i8(i8) -> i8 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
target x86_64 has_lzcnt target x86_64 has_lzcnt

View File

@@ -1,6 +1,7 @@
test run test run
target x86_64 target x86_64
target s390x
target aarch64 target aarch64
function %fpromote_f32_f64(i64 vmctx, i64, f32) -> f64 { function %fpromote_f32_f64(i64 vmctx, i64, f32) -> f64 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
target x86_64 has_bmi1 target x86_64 has_bmi1

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
; x86_64 only supports vector iabs ; x86_64 only supports vector iabs
function %iabs_i8(i8) -> i8 { function %iabs_i8(i8) -> i8 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
function %icmp_ugt_i8(i8, i8) -> b1 { function %icmp_ugt_i8(i8, i8) -> b1 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
function %ireduce_i16_i8(i16) -> i8 { function %ireduce_i16_i8(i16) -> i8 {

View File

@@ -1,5 +1,6 @@
test run test run
target x86_64 target x86_64
target s390x
target aarch64 target aarch64
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 { function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
@@ -28,7 +29,7 @@ function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32): block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4 v3 = heap_addr.i64 heap0, v1, 4
v4 = iconst.i64 42 v4 = iconst.i32 42
store.i32 v4, v3 store.i32 v4, v3
v5 = load.i32 v3 v5 = load.i32 v3
v6 = iadd.i32 v5, v2 v6 = iadd.i32 v5, v2

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
target x86_64 target x86_64
target x86_64 has_popcnt target x86_64 has_popcnt

View File

@@ -1,5 +1,6 @@
test interpret test interpret
test run test run
target s390x
target x86_64 target x86_64
function %select_eq_f32(f32, f32) -> i32 { function %select_eq_f32(f32, f32) -> i32 {

View File

@@ -1,5 +1,6 @@
test run test run
target aarch64 target aarch64
target s390x
; i8 and i16 are invalid source sizes for x86_64 ; i8 and i16 are invalid source sizes for x86_64
function %scalartovector_i8(i8) -> i8x16 { function %scalartovector_i8(i8) -> i8x16 {

View File

@@ -1,5 +1,6 @@
test run test run
target aarch64 target aarch64
target s390x
set enable_simd set enable_simd
target x86_64 has_sse3 has_ssse3 has_sse41 target x86_64 has_sse3 has_ssse3 has_sse41

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
; x86_64 backend only supports `i16`, `i32`, and `i64` types. ; x86_64 backend only supports `i16`, `i32`, and `i64` types.
function %smulhi_i8(i8, i8) -> i8 { function %smulhi_i8(i8, i8) -> i8 {

View File

@@ -1,6 +1,7 @@
test interpret test interpret
test run test run
target aarch64 target aarch64
target s390x
set enable_simd set enable_simd
target x86_64 has_sse3 has_ssse3 has_sse41 target x86_64 has_sse3 has_ssse3 has_sse41