461 lines
15 KiB
Plaintext
461 lines
15 KiB
Plaintext
test run
|
|
target s390x
|
|
target s390x has_mie2
|
|
|
|
; We can't test that these instructions are right regarding atomicity, but we can
|
|
; test if they perform their operation correctly
|
|
|
|
function %atomic_rmw_add_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big add v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x23455678
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12335678
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x12346789
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12345677
|
|
|
|
function %atomic_rmw_add_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big add v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0x11) == 0x23345678
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0xff) == 0x11345678
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0x11) == 0x12455678
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0xff) == 0x12335678
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0x11) == 0x12346778
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0xff) == 0x12345578
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0x11) == 0x12345689
|
|
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0xff) == 0x12345677
|
|
|
|
function %atomic_rmw_sub_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big sub v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0x1111) == 0x01235678
|
|
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0xffff) == 0x12355678
|
|
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0x1111) == 0x12344567
|
|
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0xffff) == 0x12345679
|
|
|
|
|
|
function %atomic_rmw_sub_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big sub v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0x11) == 0x01345678
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0xff) == 0x13345678
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0x11) == 0x12235678
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0xff) == 0x12355678
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0x11) == 0x12344578
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0xff) == 0x12345778
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0x11) == 0x12345667
|
|
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0xff) == 0x12345679
|
|
|
|
|
|
function %atomic_rmw_and_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big and v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0xf000) == 0x10005678
|
|
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0x000f) == 0x00045678
|
|
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0xf000) == 0x12345000
|
|
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0x000f) == 0x12340008
|
|
|
|
|
|
function %atomic_rmw_and_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big and v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0xf0) == 0x10345678
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0x0f) == 0x02345678
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0xf0) == 0x12305678
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0x0f) == 0x12045678
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0xf0) == 0x12345078
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0x0f) == 0x12340678
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0xf0) == 0x12345670
|
|
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0x0f) == 0x12345608
|
|
|
|
|
|
function %atomic_rmw_or_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big or v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0xf000) == 0xf2345678
|
|
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0x000f) == 0x123f5678
|
|
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0xf000) == 0x1234f678
|
|
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0x000f) == 0x1234567f
|
|
|
|
|
|
function %atomic_rmw_or_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big or v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0xf0) == 0xf2345678
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0x0f) == 0x1f345678
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0xf0) == 0x12f45678
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0x0f) == 0x123f5678
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0xf0) == 0x1234f678
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0x0f) == 0x12345f78
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0xf0) == 0x123456f8
|
|
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0x0f) == 0x1234567f
|
|
|
|
|
|
function %atomic_rmw_xor_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big xor v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0xf000) == 0xe2345678
|
|
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0x000f) == 0x123b5678
|
|
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0xf000) == 0x1234a678
|
|
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0x000f) == 0x12345677
|
|
|
|
|
|
function %atomic_rmw_xor_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big xor v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0xf0) == 0xe2345678
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0x0f) == 0x1d345678
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0xf0) == 0x12c45678
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0x0f) == 0x123b5678
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0xf0) == 0x1234a678
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0x0f) == 0x12345978
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0xf0) == 0x12345688
|
|
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0x0f) == 0x12345677
|
|
|
|
function %atomic_rmw_nand_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big nand v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0xf000) == 0xefff5678
|
|
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0x000f) == 0xfffb5678
|
|
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0xf000) == 0x1234afff
|
|
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0x000f) == 0x1234fff7
|
|
|
|
function %atomic_rmw_nand_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big nand v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0xf0) == 0xef345678
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0x0f) == 0xfd345678
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0xf0) == 0x12cf5678
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0x0f) == 0x12fb5678
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0xf0) == 0x1234af78
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0x0f) == 0x1234f978
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0xf0) == 0x1234568f
|
|
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0x0f) == 0x123456f7
|
|
|
|
|
|
function %atomic_rmw_umin_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big umin v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
|
|
|
function %atomic_rmw_umin_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big umin v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
|
|
|
|
|
function %atomic_rmw_umax_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big umax v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0x1111) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0x1111) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
|
|
|
function %atomic_rmw_umax_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big umax v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
|
|
|
|
|
function %atomic_rmw_smin_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big smin v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
|
|
|
function %atomic_rmw_smin_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big smin v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
|
|
|
|
|
function %atomic_rmw_smax_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big smax v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0x7fff) == 0x7fff5678
|
|
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0x7fff) == 0x12347fff
|
|
|
|
|
|
function %atomic_rmw_smax_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big smax v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0x7f) == 0x7f345678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0x7f) == 0x127f5678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0x7f) == 0x12347f78
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0x7f) == 0x1234567f
|
|
|
|
|
|
function %atomic_rmw_xchg_big_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 big xchg v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
|
|
|
|
|
function %atomic_rmw_xchg_big_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 big v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 big xchg v4, v2
|
|
|
|
v6 = load.i32 big v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|