s390x: Add support for all remaining atomic operations (#3746)
This adds support for all atomic operations that were unimplemented so far in the s390x back end: - atomic_rmw operations xchg, nand, smin, smax, umin, umax - $I8 and $I16 versions of atomic_rmw and atomic_cas - little endian versions of atomic_rmw and atomic_cas All of these have to be implemented by a compare-and-swap loop; and for the $I8 and $I16 versions the actual atomic instruction needs to operate on the surrounding aligned 32-bit word. Since we cannot emit new control flow during ISLE instruction selection, these compare-and-swap loops are emitted as a single meta-instruction to be expanded at emit time. However, since there is a large number of different versions of the loop required to implement all the above operations, I've implemented a facility to allow specifying the loop bodies from within ISLE after all, by creating a vector of MInst structures that will be emitted as part of the meta-instruction. There are still restrictions, in particular instructions that are part of the loop body may not modify any virtual register. But even so, this approach looks preferable to doing everything in emit.rs. A few instructions needed in those compare-and-swap loop bodies were added as well, in particular the RxSBG family of instructions as well as the LOAD REVERSED in-register byte-swap instructions. This patch also adds filetest runtests to verify the semantics of all operations, in particular the subword and little-endian variants (those are currently only executed on s390x).
This commit is contained in:
@@ -0,0 +1,40 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_i64(i64, i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v3
|
||||
|
||||
v4 = atomic_cas.i64 little v3, v1, v2
|
||||
|
||||
v5 = load.i64 little v3
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i64(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i64(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i64(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_cas_i32(i32, i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
v3 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = atomic_cas.i32 little v3, v1, v2
|
||||
|
||||
v5 = load.i32 little v3
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i32(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i32(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i32(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_big_i16(i32, i64, i16, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16, v3: i16):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i16 big v5, v2, v3
|
||||
|
||||
v7 = load.i32 big v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_big_i16(0x12345678, 0, 0x1234, 0xabcd) == 0xabcd5678
|
||||
; run: %atomic_cas_big_i16(0x12345678, 0, 0x4321, 0xabcd) == 0x12345678
|
||||
; run: %atomic_cas_big_i16(0x12345678, 2, 0x5678, 0xabcd) == 0x1234abcd
|
||||
; run: %atomic_cas_big_i16(0x12345678, 2, 0x8765, 0xabcd) == 0x12345678
|
||||
|
||||
function %atomic_cas_little_i16(i32, i64, i16, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16, v3: i16):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i16 little v5, v2, v3
|
||||
|
||||
v7 = load.i32 little v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_little_i16(0x12345678, 2, 0x1234, 0xabcd) == 0xabcd5678
|
||||
; run: %atomic_cas_little_i16(0x12345678, 2, 0x4321, 0xabcd) == 0x12345678
|
||||
; run: %atomic_cas_little_i16(0x12345678, 0, 0x5678, 0xabcd) == 0x1234abcd
|
||||
; run: %atomic_cas_little_i16(0x12345678, 0, 0x8765, 0xabcd) == 0x12345678
|
||||
|
||||
function %atomic_cas_big_i8(i32, i64, i8, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8, v3: i8):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i8 big v5, v2, v3
|
||||
|
||||
v7 = load.i32 big v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_big_i8(0x12345678, 0, 0x12, 0xab) == 0xab345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 0, 0x21, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 1, 0x34, 0xab) == 0x12ab5678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 1, 0x43, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 2, 0x56, 0xab) == 0x1234ab78
|
||||
; run: %atomic_cas_big_i8(0x12345678, 2, 0x65, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 3, 0x78, 0xab) == 0x123456ab
|
||||
; run: %atomic_cas_big_i8(0x12345678, 3, 0x87, 0xab) == 0x12345678
|
||||
|
||||
function %atomic_cas_little_i8(i32, i64, i8, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8, v3: i8):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i8 little v5, v2, v3
|
||||
|
||||
v7 = load.i32 little v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_little_i8(0x12345678, 3, 0x12, 0xab) == 0xab345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 3, 0x21, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 2, 0x34, 0xab) == 0x12ab5678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 2, 0x43, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 1, 0x56, 0xab) == 0x1234ab78
|
||||
; run: %atomic_cas_little_i8(0x12345678, 1, 0x65, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 0, 0x78, 0xab) == 0x123456ab
|
||||
; run: %atomic_cas_little_i8(0x12345678, 0, 0x87, 0xab) == 0x12345678
|
||||
|
||||
43
cranelift/filetests/filetests/runtests/atomic-cas.clif
Normal file
43
cranelift/filetests/filetests/runtests/atomic-cas.clif
Normal file
@@ -0,0 +1,43 @@
|
||||
test run
|
||||
target aarch64
|
||||
target aarch64 has_lse
|
||||
target x86_64
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_i64(i64, i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v3 = stack_addr.i64 ss0
|
||||
v4 = atomic_cas.i64 v3, v1, v2
|
||||
|
||||
v5 = stack_load.i64 ss0
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i64(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i64(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i64(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_cas_i32(i32, i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v3 = stack_addr.i32 ss0
|
||||
v4 = atomic_cas.i32 v3, v1, v2
|
||||
|
||||
v5 = stack_load.i32 ss0
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i32(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i32(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i32(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
test run
|
||||
target aarch64
|
||||
target aarch64 has_lse
|
||||
target x86_64
|
||||
; TODO: Merge this with atomic-rmw.clif when s390x supports it
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 nand v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 nand v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
429
cranelift/filetests/filetests/runtests/atomic-rmw-little.clif
Normal file
429
cranelift/filetests/filetests/runtests/atomic-rmw-little.clif
Normal file
@@ -0,0 +1,429 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_rmw_add_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little add v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_add_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_add_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_add_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_add_i64(1, 1) == 2
|
||||
; run: %atomic_rmw_add_i64(0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111) == 0xDECAFFFF_DECAFFFF
|
||||
|
||||
function %atomic_rmw_add_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little add v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_add_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_add_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_add_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_add_i32(1, 1) == 2
|
||||
; run: %atomic_rmw_add_i32(0xC0FFEEEE, 0x1DCB1111) == 0xDECAFFFF
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_sub_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little sub v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_sub_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_sub_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_sub_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_sub_i64(1, 1) == 0
|
||||
; run: %atomic_rmw_sub_i64(0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111) == 0xC0FFEEEE_C0FFEEEE
|
||||
|
||||
function %atomic_rmw_sub_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little sub v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_sub_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_sub_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_sub_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_sub_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_sub_i32(0xDECAFFFF, 0x1DCB1111) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_and_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little and v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_and_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_and_i64(0xF1FFFEFE_FEEEFFFF, 0xCEFFEFEF_DFDBFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_and_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little and v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; run: %atomic_rmw_and_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_and_i64(0xF1FFFEFE, 0xCEFFEFEF) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_or_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little or v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_or_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_or_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_or_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_or_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_or_i64(0x80AAAAAA_8A8AAAAA, 0x40554444_54405555) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_or_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little or v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; run: %atomic_rmw_or_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_or_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_or_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_or_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_or_i32(0x80AAAAAA, 0x40554444) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xor_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little xor v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xor_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xor_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_xor_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i64(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i64(0x8FA50A64_9440A07D, 0x4F5AE48A_4A8A5F82) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xor_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little xor v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xor_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xor_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_xor_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little nand v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little nand v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little umin v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little umin v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little umax v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little umax v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little smin v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little smin v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little smax v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little smax v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little xchg v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little xchg v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
907
cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif
Normal file
907
cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif
Normal file
@@ -0,0 +1,907 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_rmw_add_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big add v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x23455678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x12346789
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little add v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x23455678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x12346789
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big add v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0x11) == 0x23345678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0xff) == 0x11345678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0x11) == 0x12455678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0xff) == 0x12335678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0x11) == 0x12346778
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0xff) == 0x12345578
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0x11) == 0x12345689
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0xff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little add v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0x11) == 0x23345678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0xff) == 0x11345678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0x11) == 0x12455678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0xff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0x11) == 0x12346778
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0xff) == 0x12345578
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0x11) == 0x12345689
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0xff) == 0x12345677
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_sub_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big sub v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0x1111) == 0x01235678
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0xffff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0x1111) == 0x12344567
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0xffff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little sub v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0x1111) == 0x01235678
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0xffff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0x1111) == 0x12344567
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0xffff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big sub v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0x11) == 0x01345678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0xff) == 0x13345678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0x11) == 0x12235678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0xff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0x11) == 0x12344578
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0xff) == 0x12345778
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0x11) == 0x12345667
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0xff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little sub v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0x11) == 0x01345678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0xff) == 0x13345678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0x11) == 0x12235678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0xff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0x11) == 0x12344578
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0xff) == 0x12345778
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0x11) == 0x12345667
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0xff) == 0x12345679
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_and_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big and v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0xf000) == 0x10005678
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0x000f) == 0x00045678
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0xf000) == 0x12345000
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0x000f) == 0x12340008
|
||||
|
||||
function %atomic_rmw_and_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little and v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0xf000) == 0x10005678
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0x000f) == 0x00045678
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0xf000) == 0x12345000
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0x000f) == 0x12340008
|
||||
|
||||
function %atomic_rmw_and_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big and v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0xf0) == 0x10345678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0x0f) == 0x02345678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0xf0) == 0x12305678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0x0f) == 0x12045678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0xf0) == 0x12345078
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0x0f) == 0x12340678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0xf0) == 0x12345670
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0x0f) == 0x12345608
|
||||
|
||||
function %atomic_rmw_and_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little and v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0xf0) == 0x10345678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0x0f) == 0x02345678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0xf0) == 0x12305678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0x0f) == 0x12045678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0xf0) == 0x12345078
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0x0f) == 0x12340678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0xf0) == 0x12345670
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0x0f) == 0x12345608
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_or_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big or v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0xf000) == 0xf2345678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0x000f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0xf000) == 0x1234f678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0x000f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little or v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0xf000) == 0xf2345678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0x000f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0xf000) == 0x1234f678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0x000f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big or v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0xf0) == 0xf2345678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0x0f) == 0x1f345678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0xf0) == 0x12f45678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0x0f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0xf0) == 0x1234f678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0x0f) == 0x12345f78
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0xf0) == 0x123456f8
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0x0f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little or v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0xf0) == 0xf2345678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0x0f) == 0x1f345678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0xf0) == 0x12f45678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0x0f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0xf0) == 0x1234f678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0x0f) == 0x12345f78
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0xf0) == 0x123456f8
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0x0f) == 0x1234567f
|
||||
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xor_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big xor v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0xf000) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0x000f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0xf000) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0x000f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little xor v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0xf000) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0x000f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0xf000) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0x000f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big xor v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0xf0) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0x0f) == 0x1d345678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0xf0) == 0x12c45678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0x0f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0xf0) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0x0f) == 0x12345978
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0xf0) == 0x12345688
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0x0f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little xor v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0xf0) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0x0f) == 0x1d345678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0xf0) == 0x12c45678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0x0f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0xf0) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0x0f) == 0x12345978
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0xf0) == 0x12345688
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0x0f) == 0x12345677
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big nand v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0xf000) == 0xefff5678
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0x000f) == 0xfffb5678
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0xf000) == 0x1234afff
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0x000f) == 0x1234fff7
|
||||
|
||||
function %atomic_rmw_nand_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little nand v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0xf000) == 0xefff5678
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0x000f) == 0xfffb5678
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0xf000) == 0x1234afff
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0x000f) == 0x1234fff7
|
||||
|
||||
function %atomic_rmw_nand_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big nand v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0xf0) == 0xef345678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0x0f) == 0xfd345678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0xf0) == 0x12cf5678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0x0f) == 0x12fb5678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0xf0) == 0x1234af78
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0x0f) == 0x1234f978
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0xf0) == 0x1234568f
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0x0f) == 0x123456f7
|
||||
|
||||
function %atomic_rmw_nand_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little nand v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0xf0) == 0xef345678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0x0f) == 0xfd345678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0xf0) == 0x12cf5678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0x0f) == 0x12fb5678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0xf0) == 0x1234af78
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0x0f) == 0x1234f978
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0xf0) == 0x1234568f
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0x0f) == 0x123456f7
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big umin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little umin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big umin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little umin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big umax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_umax_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little umax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_umax_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big umax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_umax_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little umax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big smin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_smin_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little smin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_smin_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big smin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_smin_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little smin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big smax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0x7fff) == 0x7fff5678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0x7fff) == 0x12347fff
|
||||
|
||||
function %atomic_rmw_smax_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little smax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0x7fff) == 0x7fff5678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0x7fff) == 0x12347fff
|
||||
|
||||
function %atomic_rmw_smax_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big smax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0x7f) == 0x7f345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0x7f) == 0x127f5678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0x7f) == 0x12347f78
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0x7f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_smax_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little smax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0x7f) == 0x7f345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0x7f) == 0x127f5678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0x7f) == 0x12347f78
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0x7f) == 0x1234567f
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big xchg v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_xchg_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little xchg v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_xchg_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big xchg v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_xchg_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little xchg v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
@@ -196,3 +196,237 @@ block0(v0: i32, v1: i32):
|
||||
; run: %atomic_rmw_xor_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 nand v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 nand v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
Reference in New Issue
Block a user