* Added `mem_flags` parameter to `State::checked_{load,store}` as the means
for determining the endianness, typically derived from an instruction.
* Added `native_endianness` property to `InterpreterState` as fallback when
determining endianness, such as in cases where there are no memory flags
avaiable or set.
* Added `to_be` and `to_le` methods to `DataValue`.
* Added `AtomicCas` and `AtomicRmw` to list of instructions with retrievable
memory flags for `InstructionData::memflags`.
* Enabled `atomic-{cas,rmw}-subword-{big,little}.clif` for interpreter run
tests.
462 lines
16 KiB
Plaintext
462 lines
16 KiB
Plaintext
test interpret
|
|
test run
|
|
target s390x
|
|
target s390x has_mie2
|
|
target aarch64
|
|
target aarch64 has_lse
|
|
target x86_64
|
|
target riscv64
|
|
|
|
; We can't test that these instructions are right regarding atomicity, but we can
|
|
; test if they perform their operation correctly
|
|
|
|
function %atomic_rmw_add_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little add v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x23455678
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12335678
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x12346789
|
|
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12345677
|
|
|
|
function %atomic_rmw_add_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little add v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0x11) == 0x23345678
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0xff) == 0x11345678
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0x11) == 0x12455678
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0xff) == 0x12335678
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0x11) == 0x12346778
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0xff) == 0x12345578
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0x11) == 0x12345689
|
|
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0xff) == 0x12345677
|
|
|
|
function %atomic_rmw_sub_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little sub v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0x1111) == 0x01235678
|
|
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0xffff) == 0x12355678
|
|
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0x1111) == 0x12344567
|
|
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0xffff) == 0x12345679
|
|
|
|
function %atomic_rmw_sub_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little sub v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0x11) == 0x01345678
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0xff) == 0x13345678
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0x11) == 0x12235678
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0xff) == 0x12355678
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0x11) == 0x12344578
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0xff) == 0x12345778
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0x11) == 0x12345667
|
|
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0xff) == 0x12345679
|
|
|
|
function %atomic_rmw_and_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little and v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0xf000) == 0x10005678
|
|
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0x000f) == 0x00045678
|
|
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0xf000) == 0x12345000
|
|
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0x000f) == 0x12340008
|
|
|
|
function %atomic_rmw_and_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little and v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0xf0) == 0x10345678
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0x0f) == 0x02345678
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0xf0) == 0x12305678
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0x0f) == 0x12045678
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0xf0) == 0x12345078
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0x0f) == 0x12340678
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0xf0) == 0x12345670
|
|
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0x0f) == 0x12345608
|
|
|
|
|
|
function %atomic_rmw_or_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little or v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0xf000) == 0xf2345678
|
|
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0x000f) == 0x123f5678
|
|
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0xf000) == 0x1234f678
|
|
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0x000f) == 0x1234567f
|
|
|
|
function %atomic_rmw_or_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little or v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0xf0) == 0xf2345678
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0x0f) == 0x1f345678
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0xf0) == 0x12f45678
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0x0f) == 0x123f5678
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0xf0) == 0x1234f678
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0x0f) == 0x12345f78
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0xf0) == 0x123456f8
|
|
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0x0f) == 0x1234567f
|
|
|
|
function %atomic_rmw_xor_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little xor v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0xf000) == 0xe2345678
|
|
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0x000f) == 0x123b5678
|
|
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0xf000) == 0x1234a678
|
|
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0x000f) == 0x12345677
|
|
|
|
function %atomic_rmw_xor_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little xor v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0xf0) == 0xe2345678
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0x0f) == 0x1d345678
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0xf0) == 0x12c45678
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0x0f) == 0x123b5678
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0xf0) == 0x1234a678
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0x0f) == 0x12345978
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0xf0) == 0x12345688
|
|
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0x0f) == 0x12345677
|
|
|
|
|
|
function %atomic_rmw_nand_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little nand v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0xf000) == 0xefff5678
|
|
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0x000f) == 0xfffb5678
|
|
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0xf000) == 0x1234afff
|
|
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0x000f) == 0x1234fff7
|
|
|
|
function %atomic_rmw_nand_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little nand v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0xf0) == 0xef345678
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0x0f) == 0xfd345678
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0xf0) == 0x12cf5678
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0x0f) == 0x12fb5678
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0xf0) == 0x1234af78
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0x0f) == 0x1234f978
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0xf0) == 0x1234568f
|
|
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0x0f) == 0x123456f7
|
|
|
|
|
|
function %atomic_rmw_umin_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little umin v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
|
|
|
function %atomic_rmw_umin_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little umin v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
|
|
|
|
|
function %atomic_rmw_umax_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little umax v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0x1111) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0x1111) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
|
|
|
function %atomic_rmw_umax_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little umax v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0x11) == 0x12345678
|
|
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
|
|
|
|
|
function %atomic_rmw_smin_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little smin v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
|
|
|
|
|
function %atomic_rmw_smin_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little smin v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
|
|
|
|
|
function %atomic_rmw_smax_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little smax v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0x7fff) == 0x7fff5678
|
|
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0x7fff) == 0x12347fff
|
|
|
|
|
|
function %atomic_rmw_smax_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little smax v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0x7f) == 0x7f345678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0x7f) == 0x127f5678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0x7f) == 0x12347f78
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
|
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0x7f) == 0x1234567f
|
|
|
|
|
|
function %atomic_rmw_xchg_little_i16(i32, i64, i16) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i16):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i16 little xchg v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
|
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
|
|
|
|
|
function %atomic_rmw_xchg_little_i8(i32, i64, i8) -> i32 {
|
|
ss0 = explicit_slot 4
|
|
|
|
block0(v0: i32, v1: i64, v2: i8):
|
|
v3 = stack_addr.i64 ss0
|
|
store.i32 little v0, v3
|
|
|
|
v4 = iadd.i64 v3, v1
|
|
v5 = atomic_rmw.i8 little xchg v4, v2
|
|
|
|
v6 = load.i32 little v3
|
|
return v6
|
|
}
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
|
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|