s390x: Add support for all remaining atomic operations (#3746)

This adds support for all atomic operations that were unimplemented
so far in the s390x back end:
- atomic_rmw operations xchg, nand, smin, smax, umin, umax
- $I8 and $I16 versions of atomic_rmw and atomic_cas
- little endian versions of atomic_rmw and atomic_cas

All of these have to be implemented by a compare-and-swap loop;
and for the $I8 and $I16 versions the actual atomic instruction
needs to operate on the surrounding aligned 32-bit word.

Since we cannot emit new control flow during ISLE instruction
selection, these compare-and-swap loops are emitted as a single
meta-instruction to be expanded at emit time.

However, since there is a large number of different versions of
the loop required to implement all the above operations, I've
implemented a facility to allow specifying the loop bodies
from within ISLE after all, by creating a vector of MInst
structures that will be emitted as part of the meta-instruction.

There are still restrictions, in particular instructions that
are part of the loop body may not modify any virtual register.
But even so, this approach looks preferable to doing everything
in emit.rs.

A few instructions needed in those compare-and-swap loop bodies
were added as well, in particular the RxSBG family of instructions
as well as the LOAD REVERSED in-register byte-swap instructions.

This patch also adds filetest runtests to verify the semantics
of all operations, in particular the subword and little-endian
variants (those are currently only executed on s390x).
This commit is contained in:
Ulrich Weigand
2022-02-08 22:48:44 +01:00
committed by GitHub
parent 5cd97c054d
commit 9c5c872b3b
21 changed files with 6413 additions and 891 deletions

View File

@@ -0,0 +1,63 @@
test compile
target s390x
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_CAS
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_cas_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_cas.i64 little v2, v0, v1
return v3
}
; check: lrvgr %r2, %r2
; nextln: lrvgr %r3, %r3
; nextln: csg %r2, %r3, 0(%r4)
; nextln: lrvgr %r2, %r2
; nextln: br %r14
function %atomic_cas_i32(i32, i32, i64) -> i32 {
block0(v0: i32, v1: i32, v2: i64):
v3 = atomic_cas.i32 little v2, v0, v1
return v3
}
; check: lrvr %r2, %r2
; nextln: lrvr %r3, %r3
; nextln: cs %r2, %r3, 0(%r4)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_cas_i16(i64, i16, i16, i64) -> i16 {
block0(v0: i64, v1: i16, v2: i16, v3: i64):
v4 = atomic_cas.i16 little v3, v1, v2
return v4
}
; check: sllk %r2, %r5, 3
; nextln: nill %r5, 65532
; nextln: lrvr %r3, %r3
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r5)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r3, 176, 64, 48 ; jglh 1f ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_cas_i8(i64, i8, i8, i64) -> i8 {
block0(v0: i64, v1: i8, v2: i8, v3: i64):
v4 = atomic_cas.i8 little v3, v1, v2
return v4
}
; check: stmg %r14, %r15, 112(%r15)
; nextln: sllk %r2, %r5, 3
; nextln: nill %r5, 65532
; nextln: lcr %r14, %r2
; nextln: l %r0, 0(%r5)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b
; nextln: rll %r2, %r0, 8(%r2)
; nextln: lmg %r14, %r15, 112(%r15)
; nextln: br %r14

View File

@@ -23,3 +23,32 @@ block0(v0: i32, v1: i32, v2: i64):
; check: cs %r2, %r3, 0(%r4)
; nextln: br %r14
function %atomic_cas_i16(i64, i16, i16, i64) -> i16 {
block0(v0: i64, v1: i16, v2: i16, v3: i64):
v4 = atomic_cas.i16 v3, v1, v2
return v4
}
; check: sllk %r2, %r5, 3
; nextln: nill %r5, 65532
; nextln: l %r0, 0(%r5)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 48, 16 ; jglh 1f ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_cas_i8(i64, i8, i8, i64) -> i8 {
block0(v0: i64, v1: i8, v2: i8, v3: i64):
v4 = atomic_cas.i8 v3, v1, v2
return v4
}
; check: stmg %r14, %r15, 112(%r15)
; nextln: sllk %r2, %r5, 3
; nextln: nill %r5, 65532
; nextln: lcr %r14, %r2
; nextln: l %r0, 0(%r5)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b
; nextln: rll %r2, %r0, 8(%r2)
; nextln: lmg %r14, %r15, 112(%r15)
; nextln: br %r14

View File

@@ -0,0 +1,105 @@
test compile
target s390x arch13
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 nand v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: nngrk %r1, %r0, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 nand v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: nnrk %r1, %r0, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little nand v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: lg %r0, 0(%r3)
; nextln: 0: nngrk %r1, %r0, %r2 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little nand v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: nnrk %r1, %r0, %r2 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14

View File

@@ -0,0 +1,620 @@
test compile
target s390x
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (XCHG)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little xchg v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: lg %r0, 0(%r3)
; nextln: 0: csg %r0, %r2, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little xchg v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: cs %r0, %r2, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little xchg v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little xchg v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (ADD)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_add_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little add v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; agr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_add_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little add v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; ar %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_add_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little add v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; ar %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_add_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little add v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SUB)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_sub_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little sub v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; sgr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_sub_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little sub v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; sr %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little sub v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; sr %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little sub v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (AND)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_and_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little and v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: lang %r2, %r2, 0(%r3)
; nextln: lrvgr %r2, %r2
; nextln: br %r14
function %atomic_rmw_and_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little and v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: lan %r2, %r2, 0(%r3)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_and_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little and v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_and_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little and v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (OR)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_or_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little or v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: laog %r2, %r2, 0(%r3)
; nextln: lrvgr %r2, %r2
; nextln: br %r14
function %atomic_rmw_or_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little or v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: lao %r2, %r2, 0(%r3)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_or_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little or v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rosbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_or_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little or v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (XOR)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_xor_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little xor v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: laxg %r2, %r2, 0(%r3)
; nextln: lrvgr %r2, %r2
; nextln: br %r14
function %atomic_rmw_xor_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little xor v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: lax %r2, %r2, 0(%r3)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little xor v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little xor v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (NAND)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little nand v1, v2
return v3
}
; check: lrvgr %r2, %r4
; nextln: lg %r0, 0(%r3)
; nextln: 0: ngrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little nand v1, v2
return v3
}
; check: lrvr %r2, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: nrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lrvr %r4, %r4
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SMIN)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little smin v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little smin v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little smin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little smin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SMAX)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little smax v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little smax v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little smax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little smax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (UMIN)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little umin v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little umin v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little umin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little umin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (UMAX)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 little umax v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 little umax v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lrvr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 little umax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 0(%r2)
; nextln: lrvr %r2, %r2
; nextln: br %r14
function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 little umax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14

View File

@@ -1,6 +1,59 @@
test compile
target s390x
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (XCHG)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 xchg v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 xchg v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 xchg v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 xchg v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (ADD)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -23,6 +76,35 @@ block0(v0: i64, v1: i32):
; check: laa %r2, %r3, 0(%r2)
; nextln: br %r14
function %atomic_rmw_add_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 add v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_add_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 add v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SUB)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -47,6 +129,35 @@ block0(v0: i64, v1: i32):
; nextln: laa %r2, %r3, 0(%r2)
; nextln: br %r14
function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 sub v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 sub v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (AND)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -69,6 +180,33 @@ block0(v0: i64, v1: i32):
; check: lan %r2, %r3, 0(%r2)
; nextln: br %r14
function %atomic_rmw_and_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 and v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_and_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 and v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (OR)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -91,6 +229,33 @@ block0(v0: i64, v1: i32):
; check: lao %r2, %r3, 0(%r2)
; nextln: br %r14
function %atomic_rmw_or_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 or v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_or_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 or v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (XOR)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -112,3 +277,304 @@ block0(v0: i64, v1: i32):
; check: lax %r2, %r3, 0(%r2)
; nextln: br %r14
function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 xor v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 xor v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (NAND)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 nand v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: ngrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 nand v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: nrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 nand v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SMIN)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 smin v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: cgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 smin v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: cr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 smin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 smin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (SMAX)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 smax v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: cgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 smax v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: cr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 smax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 smax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (UMIN)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 umin v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: clgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 umin v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: clr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 umin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 umin v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ATOMIC_RMW (UMAX)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = atomic_rmw.i64 umax v1, v2
return v3
}
; check: lg %r0, 0(%r3)
; nextln: 0: clgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lgr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 {
block0(v0: i64, v1: i64, v2: i32):
v3 = atomic_rmw.i32 umax v1, v2
return v3
}
; check: l %r0, 0(%r3)
; nextln: 0: clr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
; nextln: lr %r2, %r0
; nextln: br %r14
function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 {
block0(v0: i64, v1: i64, v2: i16):
v3 = atomic_rmw.i16 umax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 16
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 16(%r2)
; nextln: br %r14
function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 {
block0(v0: i64, v1: i64, v2: i8):
v3 = atomic_rmw.i8 umax v1, v2
return v3
}
; check: sllk %r2, %r3, 3
; nextln: nill %r3, 65532
; nextln: sllk %r4, %r4, 24
; nextln: lcr %r5, %r2
; nextln: l %r0, 0(%r3)
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
; nextln: rll %r2, %r0, 8(%r2)
; nextln: br %r14

View File

@@ -0,0 +1,40 @@
test run
target s390x
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly
function %atomic_cas_i64(i64, i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64, v2: i64):
v3 = stack_addr.i64 ss0
store.i64 little v0, v3
v4 = atomic_cas.i64 little v3, v1, v2
v5 = load.i64 little v3
return v5
}
; run: %atomic_cas_i64(0, 0, 2) == 2
; run: %atomic_cas_i64(1, 0, 2) == 1
; run: %atomic_cas_i64(0, 1, 2) == 0
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_cas_i32(i32, i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32, v2: i32):
v3 = stack_addr.i32 ss0
store.i32 little v0, v3
v4 = atomic_cas.i32 little v3, v1, v2
v5 = load.i32 little v3
return v5
}
; run: %atomic_cas_i32(0, 0, 2) == 2
; run: %atomic_cas_i32(1, 0, 2) == 1
; run: %atomic_cas_i32(0, 1, 2) == 0
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE

View File

@@ -0,0 +1,86 @@
test run
target s390x
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly
function %atomic_cas_big_i16(i32, i64, i16, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16, v3: i16):
v4 = stack_addr.i64 ss0
store.i32 big v0, v4
v5 = iadd.i64 v4, v1
v6 = atomic_cas.i16 big v5, v2, v3
v7 = load.i32 big v4
return v7
}
; run: %atomic_cas_big_i16(0x12345678, 0, 0x1234, 0xabcd) == 0xabcd5678
; run: %atomic_cas_big_i16(0x12345678, 0, 0x4321, 0xabcd) == 0x12345678
; run: %atomic_cas_big_i16(0x12345678, 2, 0x5678, 0xabcd) == 0x1234abcd
; run: %atomic_cas_big_i16(0x12345678, 2, 0x8765, 0xabcd) == 0x12345678
function %atomic_cas_little_i16(i32, i64, i16, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16, v3: i16):
v4 = stack_addr.i64 ss0
store.i32 little v0, v4
v5 = iadd.i64 v4, v1
v6 = atomic_cas.i16 little v5, v2, v3
v7 = load.i32 little v4
return v7
}
; run: %atomic_cas_little_i16(0x12345678, 2, 0x1234, 0xabcd) == 0xabcd5678
; run: %atomic_cas_little_i16(0x12345678, 2, 0x4321, 0xabcd) == 0x12345678
; run: %atomic_cas_little_i16(0x12345678, 0, 0x5678, 0xabcd) == 0x1234abcd
; run: %atomic_cas_little_i16(0x12345678, 0, 0x8765, 0xabcd) == 0x12345678
function %atomic_cas_big_i8(i32, i64, i8, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8, v3: i8):
v4 = stack_addr.i64 ss0
store.i32 big v0, v4
v5 = iadd.i64 v4, v1
v6 = atomic_cas.i8 big v5, v2, v3
v7 = load.i32 big v4
return v7
}
; run: %atomic_cas_big_i8(0x12345678, 0, 0x12, 0xab) == 0xab345678
; run: %atomic_cas_big_i8(0x12345678, 0, 0x21, 0xab) == 0x12345678
; run: %atomic_cas_big_i8(0x12345678, 1, 0x34, 0xab) == 0x12ab5678
; run: %atomic_cas_big_i8(0x12345678, 1, 0x43, 0xab) == 0x12345678
; run: %atomic_cas_big_i8(0x12345678, 2, 0x56, 0xab) == 0x1234ab78
; run: %atomic_cas_big_i8(0x12345678, 2, 0x65, 0xab) == 0x12345678
; run: %atomic_cas_big_i8(0x12345678, 3, 0x78, 0xab) == 0x123456ab
; run: %atomic_cas_big_i8(0x12345678, 3, 0x87, 0xab) == 0x12345678
function %atomic_cas_little_i8(i32, i64, i8, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8, v3: i8):
v4 = stack_addr.i64 ss0
store.i32 little v0, v4
v5 = iadd.i64 v4, v1
v6 = atomic_cas.i8 little v5, v2, v3
v7 = load.i32 little v4
return v7
}
; run: %atomic_cas_little_i8(0x12345678, 3, 0x12, 0xab) == 0xab345678
; run: %atomic_cas_little_i8(0x12345678, 3, 0x21, 0xab) == 0x12345678
; run: %atomic_cas_little_i8(0x12345678, 2, 0x34, 0xab) == 0x12ab5678
; run: %atomic_cas_little_i8(0x12345678, 2, 0x43, 0xab) == 0x12345678
; run: %atomic_cas_little_i8(0x12345678, 1, 0x56, 0xab) == 0x1234ab78
; run: %atomic_cas_little_i8(0x12345678, 1, 0x65, 0xab) == 0x12345678
; run: %atomic_cas_little_i8(0x12345678, 0, 0x78, 0xab) == 0x123456ab
; run: %atomic_cas_little_i8(0x12345678, 0, 0x87, 0xab) == 0x12345678

View File

@@ -0,0 +1,43 @@
test run
target aarch64
target aarch64 has_lse
target x86_64
target s390x
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly
function %atomic_cas_i64(i64, i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64, v2: i64):
stack_store.i64 v0, ss0
v3 = stack_addr.i64 ss0
v4 = atomic_cas.i64 v3, v1, v2
v5 = stack_load.i64 ss0
return v5
}
; run: %atomic_cas_i64(0, 0, 2) == 2
; run: %atomic_cas_i64(1, 0, 2) == 1
; run: %atomic_cas_i64(0, 1, 2) == 0
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_cas_i32(i32, i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32, v2: i32):
stack_store.i32 v0, ss0
v3 = stack_addr.i32 ss0
v4 = atomic_cas.i32 v3, v1, v2
v5 = stack_load.i32 ss0
return v5
}
; run: %atomic_cas_i32(0, 0, 2) == 2
; run: %atomic_cas_i32(1, 0, 2) == 1
; run: %atomic_cas_i32(0, 1, 2) == 0
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE

View File

@@ -1,238 +0,0 @@
test run
target aarch64
target aarch64 has_lse
target x86_64
; TODO: Merge this with atomic-rmw.clif when s390x supports it
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 nand v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_nand_i64(0, 0) == -1
; run: %atomic_rmw_nand_i64(1, 0) == -1
; run: %atomic_rmw_nand_i64(0, 1) == -1
; run: %atomic_rmw_nand_i64(1, 1) == -2
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 nand v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_nand_i32(0, 0) == -1
; run: %atomic_rmw_nand_i32(1, 0) == -1
; run: %atomic_rmw_nand_i32(0, 1) == -1
; run: %atomic_rmw_nand_i32(1, 1) == -2
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 umin v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_umin_i64(0, 0) == 0
; run: %atomic_rmw_umin_i64(1, 0) == 0
; run: %atomic_rmw_umin_i64(0, 1) == 0
; run: %atomic_rmw_umin_i64(1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, -3) == -3
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 umin v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_umin_i32(0, 0) == 0
; run: %atomic_rmw_umin_i32(1, 0) == 0
; run: %atomic_rmw_umin_i32(0, 1) == 0
; run: %atomic_rmw_umin_i32(1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, -3) == -3
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 umax v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_umax_i64(0, 0) == 0
; run: %atomic_rmw_umax_i64(1, 0) == 1
; run: %atomic_rmw_umax_i64(0, 1) == 1
; run: %atomic_rmw_umax_i64(1, 1) == 1
; run: %atomic_rmw_umax_i64(-1, 1) == -1
; run: %atomic_rmw_umax_i64(-1, -3) == -1
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 umax v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_umax_i32(0, 0) == 0
; run: %atomic_rmw_umax_i32(1, 0) == 1
; run: %atomic_rmw_umax_i32(0, 1) == 1
; run: %atomic_rmw_umax_i32(1, 1) == 1
; run: %atomic_rmw_umax_i32(-1, 1) == -1
; run: %atomic_rmw_umax_i32(-1, -3) == -1
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 smin v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_smin_i64(0, 0) == 0
; run: %atomic_rmw_smin_i64(1, 0) == 0
; run: %atomic_rmw_smin_i64(0, 1) == 0
; run: %atomic_rmw_smin_i64(1, 1) == 1
; run: %atomic_rmw_smin_i64(-1, 1) == -1
; run: %atomic_rmw_smin_i64(-1, -3) == -3
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 smin v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_smin_i32(0, 0) == 0
; run: %atomic_rmw_smin_i32(1, 0) == 0
; run: %atomic_rmw_smin_i32(0, 1) == 0
; run: %atomic_rmw_smin_i32(1, 1) == 1
; run: %atomic_rmw_smin_i32(-1, -1) == -1
; run: %atomic_rmw_smin_i32(-1, -3) == -3
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 smax v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_smax_i64(0, 0) == 0
; run: %atomic_rmw_smax_i64(1, 0) == 1
; run: %atomic_rmw_smax_i64(0, 1) == 1
; run: %atomic_rmw_smax_i64(1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, -3) == -1
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 smax v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_smax_i32(0, 0) == 0
; run: %atomic_rmw_smax_i32(1, 0) == 1
; run: %atomic_rmw_smax_i32(0, 1) == 1
; run: %atomic_rmw_smax_i32(1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, -3) == -1
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 xchg v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_xchg_i64(0, 0) == 0
; run: %atomic_rmw_xchg_i64(1, 0) == 0
; run: %atomic_rmw_xchg_i64(0, 1) == 1
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 xchg v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_xchg_i32(0, 0) == 0
; run: %atomic_rmw_xchg_i32(1, 0) == 0
; run: %atomic_rmw_xchg_i32(0, 1) == 1
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE

View File

@@ -0,0 +1,429 @@
test run
target s390x
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly
function %atomic_rmw_add_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little add v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_add_i64(0, 0) == 0
; run: %atomic_rmw_add_i64(1, 0) == 1
; run: %atomic_rmw_add_i64(0, 1) == 1
; run: %atomic_rmw_add_i64(1, 1) == 2
; run: %atomic_rmw_add_i64(0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111) == 0xDECAFFFF_DECAFFFF
function %atomic_rmw_add_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little add v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_add_i32(0, 0) == 0
; run: %atomic_rmw_add_i32(1, 0) == 1
; run: %atomic_rmw_add_i32(0, 1) == 1
; run: %atomic_rmw_add_i32(1, 1) == 2
; run: %atomic_rmw_add_i32(0xC0FFEEEE, 0x1DCB1111) == 0xDECAFFFF
function %atomic_rmw_sub_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little sub v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_sub_i64(0, 0) == 0
; run: %atomic_rmw_sub_i64(1, 0) == 1
; run: %atomic_rmw_sub_i64(0, 1) == -1
; run: %atomic_rmw_sub_i64(1, 1) == 0
; run: %atomic_rmw_sub_i64(0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111) == 0xC0FFEEEE_C0FFEEEE
function %atomic_rmw_sub_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little sub v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_sub_i32(0, 0) == 0
; run: %atomic_rmw_sub_i32(1, 0) == 1
; run: %atomic_rmw_sub_i32(0, 1) == -1
; run: %atomic_rmw_sub_i32(1, 1) == 0
; run: %atomic_rmw_sub_i32(0xDECAFFFF, 0x1DCB1111) == 0xC0FFEEEE
function %atomic_rmw_and_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little and v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_and_i64(0, 0) == 0
; run: %atomic_rmw_and_i64(1, 0) == 0
; run: %atomic_rmw_and_i64(0, 1) == 0
; run: %atomic_rmw_and_i64(1, 1) == 1
; run: %atomic_rmw_and_i64(0xF1FFFEFE_FEEEFFFF, 0xCEFFEFEF_DFDBFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_and_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little and v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_and_i64(0, 0) == 0
; run: %atomic_rmw_and_i64(1, 0) == 0
; run: %atomic_rmw_and_i64(0, 1) == 0
; run: %atomic_rmw_and_i64(1, 1) == 1
; run: %atomic_rmw_and_i64(0xF1FFFEFE, 0xCEFFEFEF) == 0xC0FFEEEE
function %atomic_rmw_or_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little or v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_or_i64(0, 0) == 0
; run: %atomic_rmw_or_i64(1, 0) == 1
; run: %atomic_rmw_or_i64(0, 1) == 1
; run: %atomic_rmw_or_i64(1, 1) == 1
; run: %atomic_rmw_or_i64(0x80AAAAAA_8A8AAAAA, 0x40554444_54405555) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_or_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little or v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_or_i32(0, 0) == 0
; run: %atomic_rmw_or_i32(1, 0) == 1
; run: %atomic_rmw_or_i32(0, 1) == 1
; run: %atomic_rmw_or_i32(1, 1) == 1
; run: %atomic_rmw_or_i32(0x80AAAAAA, 0x40554444) == 0xC0FFEEEE
function %atomic_rmw_xor_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little xor v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_xor_i64(0, 0) == 0
; run: %atomic_rmw_xor_i64(1, 0) == 1
; run: %atomic_rmw_xor_i64(0, 1) == 1
; run: %atomic_rmw_xor_i64(1, 1) == 0
; run: %atomic_rmw_xor_i64(0x8FA50A64_9440A07D, 0x4F5AE48A_4A8A5F82) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_xor_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little xor v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_xor_i32(0, 0) == 0
; run: %atomic_rmw_xor_i32(1, 0) == 1
; run: %atomic_rmw_xor_i32(0, 1) == 1
; run: %atomic_rmw_xor_i32(1, 1) == 0
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little nand v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_nand_i64(0, 0) == -1
; run: %atomic_rmw_nand_i64(1, 0) == -1
; run: %atomic_rmw_nand_i64(0, 1) == -1
; run: %atomic_rmw_nand_i64(1, 1) == -2
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little nand v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_nand_i32(0, 0) == -1
; run: %atomic_rmw_nand_i32(1, 0) == -1
; run: %atomic_rmw_nand_i32(0, 1) == -1
; run: %atomic_rmw_nand_i32(1, 1) == -2
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little umin v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_umin_i64(0, 0) == 0
; run: %atomic_rmw_umin_i64(1, 0) == 0
; run: %atomic_rmw_umin_i64(0, 1) == 0
; run: %atomic_rmw_umin_i64(1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, -3) == -3
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little umin v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_umin_i32(0, 0) == 0
; run: %atomic_rmw_umin_i32(1, 0) == 0
; run: %atomic_rmw_umin_i32(0, 1) == 0
; run: %atomic_rmw_umin_i32(1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, -3) == -3
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little umax v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_umax_i64(0, 0) == 0
; run: %atomic_rmw_umax_i64(1, 0) == 1
; run: %atomic_rmw_umax_i64(0, 1) == 1
; run: %atomic_rmw_umax_i64(1, 1) == 1
; run: %atomic_rmw_umax_i64(-1, 1) == -1
; run: %atomic_rmw_umax_i64(-1, -3) == -1
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little umax v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_umax_i32(0, 0) == 0
; run: %atomic_rmw_umax_i32(1, 0) == 1
; run: %atomic_rmw_umax_i32(0, 1) == 1
; run: %atomic_rmw_umax_i32(1, 1) == 1
; run: %atomic_rmw_umax_i32(-1, 1) == -1
; run: %atomic_rmw_umax_i32(-1, -3) == -1
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little smin v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_smin_i64(0, 0) == 0
; run: %atomic_rmw_smin_i64(1, 0) == 0
; run: %atomic_rmw_smin_i64(0, 1) == 0
; run: %atomic_rmw_smin_i64(1, 1) == 1
; run: %atomic_rmw_smin_i64(-1, 1) == -1
; run: %atomic_rmw_smin_i64(-1, -3) == -3
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little smin v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_smin_i32(0, 0) == 0
; run: %atomic_rmw_smin_i32(1, 0) == 0
; run: %atomic_rmw_smin_i32(0, 1) == 0
; run: %atomic_rmw_smin_i32(1, 1) == 1
; run: %atomic_rmw_smin_i32(-1, -1) == -1
; run: %atomic_rmw_smin_i32(-1, -3) == -3
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little smax v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_smax_i64(0, 0) == 0
; run: %atomic_rmw_smax_i64(1, 0) == 1
; run: %atomic_rmw_smax_i64(0, 1) == 1
; run: %atomic_rmw_smax_i64(1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, -3) == -1
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little smax v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_smax_i32(0, 0) == 0
; run: %atomic_rmw_smax_i32(1, 0) == 1
; run: %atomic_rmw_smax_i32(0, 1) == 1
; run: %atomic_rmw_smax_i32(1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, -3) == -1
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss0
store.i64 little v0, v2
v3 = atomic_rmw.i64 little xchg v2, v1
v4 = load.i64 little v2
return v4
}
; run: %atomic_rmw_xchg_i64(0, 0) == 0
; run: %atomic_rmw_xchg_i64(1, 0) == 0
; run: %atomic_rmw_xchg_i64(0, 1) == 1
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
v2 = stack_addr.i32 ss0
store.i32 little v0, v2
v3 = atomic_rmw.i32 little xchg v2, v1
v4 = load.i32 little v2
return v4
}
; run: %atomic_rmw_xchg_i32(0, 0) == 0
; run: %atomic_rmw_xchg_i32(1, 0) == 0
; run: %atomic_rmw_xchg_i32(0, 1) == 1
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE

View File

@@ -0,0 +1,907 @@
test run
target s390x
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly
function %atomic_rmw_add_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big add v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x23455678
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12335678
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x12346789
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12345677
function %atomic_rmw_add_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little add v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x23455678
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12335678
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x12346789
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12345677
function %atomic_rmw_add_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big add v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0x11) == 0x23345678
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0xff) == 0x11345678
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0x11) == 0x12455678
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0xff) == 0x12335678
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0x11) == 0x12346778
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0xff) == 0x12345578
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0x11) == 0x12345689
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0xff) == 0x12345677
function %atomic_rmw_add_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little add v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0x11) == 0x23345678
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0xff) == 0x11345678
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0x11) == 0x12455678
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0xff) == 0x12335678
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0x11) == 0x12346778
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0xff) == 0x12345578
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0x11) == 0x12345689
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0xff) == 0x12345677
function %atomic_rmw_sub_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big sub v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0x1111) == 0x01235678
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0xffff) == 0x12355678
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0x1111) == 0x12344567
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0xffff) == 0x12345679
function %atomic_rmw_sub_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little sub v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0x1111) == 0x01235678
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0xffff) == 0x12355678
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0x1111) == 0x12344567
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0xffff) == 0x12345679
function %atomic_rmw_sub_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big sub v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0x11) == 0x01345678
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0xff) == 0x13345678
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0x11) == 0x12235678
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0xff) == 0x12355678
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0x11) == 0x12344578
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0xff) == 0x12345778
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0x11) == 0x12345667
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0xff) == 0x12345679
function %atomic_rmw_sub_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little sub v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0x11) == 0x01345678
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0xff) == 0x13345678
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0x11) == 0x12235678
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0xff) == 0x12355678
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0x11) == 0x12344578
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0xff) == 0x12345778
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0x11) == 0x12345667
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0xff) == 0x12345679
function %atomic_rmw_and_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big and v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0xf000) == 0x10005678
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0x000f) == 0x00045678
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0xf000) == 0x12345000
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0x000f) == 0x12340008
function %atomic_rmw_and_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little and v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0xf000) == 0x10005678
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0x000f) == 0x00045678
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0xf000) == 0x12345000
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0x000f) == 0x12340008
function %atomic_rmw_and_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big and v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0xf0) == 0x10345678
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0x0f) == 0x02345678
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0xf0) == 0x12305678
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0x0f) == 0x12045678
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0xf0) == 0x12345078
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0x0f) == 0x12340678
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0xf0) == 0x12345670
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0x0f) == 0x12345608
function %atomic_rmw_and_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little and v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0xf0) == 0x10345678
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0x0f) == 0x02345678
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0xf0) == 0x12305678
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0x0f) == 0x12045678
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0xf0) == 0x12345078
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0x0f) == 0x12340678
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0xf0) == 0x12345670
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0x0f) == 0x12345608
function %atomic_rmw_or_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big or v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0xf000) == 0xf2345678
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0x000f) == 0x123f5678
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0xf000) == 0x1234f678
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0x000f) == 0x1234567f
function %atomic_rmw_or_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little or v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0xf000) == 0xf2345678
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0x000f) == 0x123f5678
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0xf000) == 0x1234f678
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0x000f) == 0x1234567f
function %atomic_rmw_or_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big or v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0xf0) == 0xf2345678
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0x0f) == 0x1f345678
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0xf0) == 0x12f45678
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0x0f) == 0x123f5678
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0xf0) == 0x1234f678
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0x0f) == 0x12345f78
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0xf0) == 0x123456f8
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0x0f) == 0x1234567f
function %atomic_rmw_or_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little or v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0xf0) == 0xf2345678
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0x0f) == 0x1f345678
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0xf0) == 0x12f45678
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0x0f) == 0x123f5678
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0xf0) == 0x1234f678
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0x0f) == 0x12345f78
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0xf0) == 0x123456f8
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0x0f) == 0x1234567f
function %atomic_rmw_xor_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big xor v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0xf000) == 0xe2345678
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0x000f) == 0x123b5678
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0xf000) == 0x1234a678
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0x000f) == 0x12345677
function %atomic_rmw_xor_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little xor v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0xf000) == 0xe2345678
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0x000f) == 0x123b5678
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0xf000) == 0x1234a678
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0x000f) == 0x12345677
function %atomic_rmw_xor_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big xor v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0xf0) == 0xe2345678
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0x0f) == 0x1d345678
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0xf0) == 0x12c45678
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0x0f) == 0x123b5678
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0xf0) == 0x1234a678
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0x0f) == 0x12345978
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0xf0) == 0x12345688
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0x0f) == 0x12345677
function %atomic_rmw_xor_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little xor v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0xf0) == 0xe2345678
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0x0f) == 0x1d345678
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0xf0) == 0x12c45678
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0x0f) == 0x123b5678
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0xf0) == 0x1234a678
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0x0f) == 0x12345978
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0xf0) == 0x12345688
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0x0f) == 0x12345677
function %atomic_rmw_nand_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big nand v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0xf000) == 0xefff5678
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0x000f) == 0xfffb5678
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0xf000) == 0x1234afff
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0x000f) == 0x1234fff7
function %atomic_rmw_nand_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little nand v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0xf000) == 0xefff5678
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0x000f) == 0xfffb5678
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0xf000) == 0x1234afff
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0x000f) == 0x1234fff7
function %atomic_rmw_nand_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big nand v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0xf0) == 0xef345678
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0x0f) == 0xfd345678
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0xf0) == 0x12cf5678
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0x0f) == 0x12fb5678
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0xf0) == 0x1234af78
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0x0f) == 0x1234f978
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0xf0) == 0x1234568f
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0x0f) == 0x123456f7
function %atomic_rmw_nand_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little nand v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0xf0) == 0xef345678
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0x0f) == 0xfd345678
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0xf0) == 0x12cf5678
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0x0f) == 0x12fb5678
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0xf0) == 0x1234af78
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0x0f) == 0x1234f978
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0xf0) == 0x1234568f
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0x0f) == 0x123456f7
function %atomic_rmw_umin_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big umin v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0xffff) == 0x12345678
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0xffff) == 0x12345678
function %atomic_rmw_umin_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little umin v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0xffff) == 0x12345678
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0xffff) == 0x12345678
function %atomic_rmw_umin_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big umin v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0x11) == 0x11345678
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0xff) == 0x12345678
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0x11) == 0x12115678
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0xff) == 0x12345678
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0x11) == 0x12341178
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0xff) == 0x12345678
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0x11) == 0x12345611
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0xff) == 0x12345678
function %atomic_rmw_umin_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little umin v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0x11) == 0x11345678
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0xff) == 0x12345678
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0x11) == 0x12115678
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0xff) == 0x12345678
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0x11) == 0x12341178
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0xff) == 0x12345678
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0x11) == 0x12345611
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0xff) == 0x12345678
function %atomic_rmw_umax_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big umax v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0x1111) == 0x12345678
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0x1111) == 0x12345678
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
function %atomic_rmw_umax_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little umax v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0x1111) == 0x12345678
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0x1111) == 0x12345678
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
function %atomic_rmw_umax_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big umax v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0x11) == 0x12345678
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0xff) == 0xff345678
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0x11) == 0x12345678
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0x11) == 0x12345678
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0x11) == 0x12345678
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0xff) == 0x123456ff
function %atomic_rmw_umax_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little umax v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0x11) == 0x12345678
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0xff) == 0xff345678
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0x11) == 0x12345678
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0x11) == 0x12345678
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0x11) == 0x12345678
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0xff) == 0x123456ff
function %atomic_rmw_smin_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big smin v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
function %atomic_rmw_smin_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little smin v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
function %atomic_rmw_smin_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big smin v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0x11) == 0x11345678
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0xff) == 0xff345678
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0x11) == 0x12115678
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0x11) == 0x12341178
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0x11) == 0x12345611
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0xff) == 0x123456ff
function %atomic_rmw_smin_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little smin v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0x11) == 0x11345678
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0xff) == 0xff345678
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0x11) == 0x12115678
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0x11) == 0x12341178
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0x11) == 0x12345611
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0xff) == 0x123456ff
function %atomic_rmw_smax_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big smax v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0xffff) == 0x12345678
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0x7fff) == 0x7fff5678
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0xffff) == 0x12345678
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0x7fff) == 0x12347fff
function %atomic_rmw_smax_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little smax v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0xffff) == 0x12345678
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0x7fff) == 0x7fff5678
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0xffff) == 0x12345678
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0x7fff) == 0x12347fff
function %atomic_rmw_smax_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big smax v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0xff) == 0x12345678
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0x7f) == 0x7f345678
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0xff) == 0x12345678
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0x7f) == 0x127f5678
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0xff) == 0x12345678
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0x7f) == 0x12347f78
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0xff) == 0x12345678
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0x7f) == 0x1234567f
function %atomic_rmw_smax_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little smax v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0xff) == 0x12345678
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0x7f) == 0x7f345678
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0xff) == 0x12345678
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0x7f) == 0x127f5678
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0xff) == 0x12345678
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0x7f) == 0x12347f78
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0xff) == 0x12345678
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0x7f) == 0x1234567f
function %atomic_rmw_xchg_big_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 big xchg v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x11115678
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0xffff5678
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x12341111
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0x1234ffff
function %atomic_rmw_xchg_little_i16(i32, i64, i16) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i16):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i16 little xchg v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x11115678
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x12341111
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
function %atomic_rmw_xchg_big_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 big v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 big xchg v4, v2
v6 = load.i32 big v3
return v6
}
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0x11) == 0x11345678
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0xff) == 0xff345678
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0x11) == 0x12115678
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0x11) == 0x12341178
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0x11) == 0x12345611
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0xff) == 0x123456ff
function %atomic_rmw_xchg_little_i8(i32, i64, i8) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i64, v2: i8):
v3 = stack_addr.i64 ss0
store.i32 little v0, v3
v4 = iadd.i64 v3, v1
v5 = atomic_rmw.i8 little xchg v4, v2
v6 = load.i32 little v3
return v6
}
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0x11) == 0x11345678
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0xff) == 0xff345678
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0x11) == 0x12115678
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0x11) == 0x12341178
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0x11) == 0x12345611
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0xff) == 0x123456ff

View File

@@ -196,3 +196,237 @@ block0(v0: i32, v1: i32):
; run: %atomic_rmw_xor_i32(0, 1) == 1
; run: %atomic_rmw_xor_i32(1, 1) == 0
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 nand v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_nand_i64(0, 0) == -1
; run: %atomic_rmw_nand_i64(1, 0) == -1
; run: %atomic_rmw_nand_i64(0, 1) == -1
; run: %atomic_rmw_nand_i64(1, 1) == -2
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 nand v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_nand_i32(0, 0) == -1
; run: %atomic_rmw_nand_i32(1, 0) == -1
; run: %atomic_rmw_nand_i32(0, 1) == -1
; run: %atomic_rmw_nand_i32(1, 1) == -2
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 umin v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_umin_i64(0, 0) == 0
; run: %atomic_rmw_umin_i64(1, 0) == 0
; run: %atomic_rmw_umin_i64(0, 1) == 0
; run: %atomic_rmw_umin_i64(1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, 1) == 1
; run: %atomic_rmw_umin_i64(-1, -3) == -3
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 umin v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_umin_i32(0, 0) == 0
; run: %atomic_rmw_umin_i32(1, 0) == 0
; run: %atomic_rmw_umin_i32(0, 1) == 0
; run: %atomic_rmw_umin_i32(1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, 1) == 1
; run: %atomic_rmw_umin_i32(-1, -3) == -3
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 umax v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_umax_i64(0, 0) == 0
; run: %atomic_rmw_umax_i64(1, 0) == 1
; run: %atomic_rmw_umax_i64(0, 1) == 1
; run: %atomic_rmw_umax_i64(1, 1) == 1
; run: %atomic_rmw_umax_i64(-1, 1) == -1
; run: %atomic_rmw_umax_i64(-1, -3) == -1
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 umax v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_umax_i32(0, 0) == 0
; run: %atomic_rmw_umax_i32(1, 0) == 1
; run: %atomic_rmw_umax_i32(0, 1) == 1
; run: %atomic_rmw_umax_i32(1, 1) == 1
; run: %atomic_rmw_umax_i32(-1, 1) == -1
; run: %atomic_rmw_umax_i32(-1, -3) == -1
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 smin v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_smin_i64(0, 0) == 0
; run: %atomic_rmw_smin_i64(1, 0) == 0
; run: %atomic_rmw_smin_i64(0, 1) == 0
; run: %atomic_rmw_smin_i64(1, 1) == 1
; run: %atomic_rmw_smin_i64(-1, 1) == -1
; run: %atomic_rmw_smin_i64(-1, -3) == -3
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 smin v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_smin_i32(0, 0) == 0
; run: %atomic_rmw_smin_i32(1, 0) == 0
; run: %atomic_rmw_smin_i32(0, 1) == 0
; run: %atomic_rmw_smin_i32(1, 1) == 1
; run: %atomic_rmw_smin_i32(-1, -1) == -1
; run: %atomic_rmw_smin_i32(-1, -3) == -3
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 smax v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_smax_i64(0, 0) == 0
; run: %atomic_rmw_smax_i64(1, 0) == 1
; run: %atomic_rmw_smax_i64(0, 1) == 1
; run: %atomic_rmw_smax_i64(1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, 1) == 1
; run: %atomic_rmw_smax_i64(-1, -3) == -1
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 smax v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_smax_i32(0, 0) == 0
; run: %atomic_rmw_smax_i32(1, 0) == 1
; run: %atomic_rmw_smax_i32(0, 1) == 1
; run: %atomic_rmw_smax_i32(1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, 1) == 1
; run: %atomic_rmw_smax_i32(-1, -3) == -1
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
ss0 = explicit_slot 8
block0(v0: i64, v1: i64):
stack_store.i64 v0, ss0
v2 = stack_addr.i64 ss0
v3 = atomic_rmw.i64 xchg v2, v1
v4 = stack_load.i64 ss0
return v4
}
; run: %atomic_rmw_xchg_i64(0, 0) == 0
; run: %atomic_rmw_xchg_i64(1, 0) == 0
; run: %atomic_rmw_xchg_i64(0, 1) == 1
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
ss0 = explicit_slot 4
block0(v0: i32, v1: i32):
stack_store.i32 v0, ss0
v2 = stack_addr.i32 ss0
v3 = atomic_rmw.i32 xchg v2, v1
v4 = stack_load.i32 ss0
return v4
}
; run: %atomic_rmw_xchg_i32(0, 0) == 0
; run: %atomic_rmw_xchg_i32(1, 0) == 0
; run: %atomic_rmw_xchg_i32(0, 1) == 1
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE