s390x: Add support for all remaining atomic operations (#3746)
This adds support for all atomic operations that were unimplemented so far in the s390x back end: - atomic_rmw operations xchg, nand, smin, smax, umin, umax - $I8 and $I16 versions of atomic_rmw and atomic_cas - little endian versions of atomic_rmw and atomic_cas All of these have to be implemented by a compare-and-swap loop; and for the $I8 and $I16 versions the actual atomic instruction needs to operate on the surrounding aligned 32-bit word. Since we cannot emit new control flow during ISLE instruction selection, these compare-and-swap loops are emitted as a single meta-instruction to be expanded at emit time. However, since there is a large number of different versions of the loop required to implement all the above operations, I've implemented a facility to allow specifying the loop bodies from within ISLE after all, by creating a vector of MInst structures that will be emitted as part of the meta-instruction. There are still restrictions, in particular instructions that are part of the loop body may not modify any virtual register. But even so, this approach looks preferable to doing everything in emit.rs. A few instructions needed in those compare-and-swap loop bodies were added as well, in particular the RxSBG family of instructions as well as the LOAD REVERSED in-register byte-swap instructions. This patch also adds filetest runtests to verify the semantics of all operations, in particular the subword and little-endian variants (those are currently only executed on s390x).
This commit is contained in:
2
build.rs
2
build.rs
@@ -173,8 +173,6 @@ fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool {
|
||||
// No simd support yet for s390x.
|
||||
("simd", _) if platform_is_s390x() => return true,
|
||||
("memory64", "simd") if platform_is_s390x() => return true,
|
||||
// No full atomics support yet for s390x.
|
||||
("memory64", "threads") if platform_is_s390x() => return true,
|
||||
_ => {}
|
||||
},
|
||||
_ => panic!("unrecognized strategy"),
|
||||
|
||||
@@ -112,6 +112,26 @@
|
||||
(shift_imm u8)
|
||||
(shift_reg Reg))
|
||||
|
||||
;; A rotate-then-<op>-selected-bits instruction with a register
|
||||
;; in/out-operand, another register source, and three immediates.
|
||||
(RxSBG
|
||||
(op RxSBGOp)
|
||||
(rd WritableReg)
|
||||
(rn Reg)
|
||||
(start_bit u8)
|
||||
(end_bit u8)
|
||||
(rotate_amt i8))
|
||||
|
||||
;; The test-only version of RxSBG, which does not modify any register
|
||||
;; but only sets the condition code.
|
||||
(RxSBGTest
|
||||
(op RxSBGOp)
|
||||
(rd Reg)
|
||||
(rn Reg)
|
||||
(start_bit u8)
|
||||
(end_bit u8)
|
||||
(rotate_amt i8))
|
||||
|
||||
;; An unary operation with a register source and a register destination.
|
||||
(UnaryRR
|
||||
(op UnaryOp)
|
||||
@@ -658,6 +678,19 @@
|
||||
(rd WritableReg)
|
||||
(mem MemArg))
|
||||
|
||||
;; Meta-instruction to emit a loop around a sequence of instructions.
|
||||
;; This control flow is not visible to the compiler core, in particular
|
||||
;; the register allocator. Therefore, instructions in the loop may not
|
||||
;; write to any virtual register, so any writes must use reserved hard
|
||||
;; registers (e.g. %r0, %r1). *Reading* virtual registers is OK.
|
||||
(Loop
|
||||
(body VecMInst)
|
||||
(cond Cond))
|
||||
|
||||
;; Conditional branch breaking out of a loop emitted via Loop.
|
||||
(CondBreak
|
||||
(cond Cond))
|
||||
|
||||
;; Marker, no-op in generated code SP "virtual offset" is adjusted. This
|
||||
;; controls how MemArg::NominalSPOffset args are lowered.
|
||||
(VirtualSPOffsetAdj
|
||||
@@ -732,6 +765,8 @@
|
||||
(Neg64Ext32)
|
||||
(PopcntByte)
|
||||
(PopcntReg)
|
||||
(BSwap32)
|
||||
(BSwap64)
|
||||
))
|
||||
|
||||
;; A shift operation.
|
||||
@@ -747,6 +782,15 @@
|
||||
(AShR64)
|
||||
))
|
||||
|
||||
;; A rotate-then-<op>-selected-bits operation.
|
||||
(type RxSBGOp
|
||||
(enum
|
||||
(Insert)
|
||||
(And)
|
||||
(Or)
|
||||
(Xor)
|
||||
))
|
||||
|
||||
;; An integer comparison operation.
|
||||
(type CmpOp
|
||||
(enum
|
||||
@@ -1395,6 +1439,13 @@
|
||||
(_ Unit (emit (MInst.ShiftRR op dst src shift_imm shift_reg))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Helper for emitting `MInst.RxSBGTest` instructions.
|
||||
(decl rxsbg_test (RxSBGOp Reg Reg u8 u8 i8) ProducesFlags)
|
||||
(rule (rxsbg_test op src1 src2 start_bit end_bit rotate_amt)
|
||||
(ProducesFlags.ProducesFlags (MInst.RxSBGTest op src1 src2
|
||||
start_bit end_bit rotate_amt)
|
||||
(invalid_reg)))
|
||||
|
||||
;; Helper for emitting `MInst.UnaryRR` instructions.
|
||||
(decl unary_rr (Type UnaryOp Reg) Reg)
|
||||
(rule (unary_rr ty op src)
|
||||
@@ -1719,6 +1770,95 @@
|
||||
result))
|
||||
|
||||
|
||||
;; Helpers for instruction sequences ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; Completed instruction sequence for use in MInst.Loop.
|
||||
(type VecMInst (primitive VecMInst))
|
||||
|
||||
;; Partial (mutable) instruction sequence in the process of being created.
|
||||
(type VecMInstBuilder extern (enum))
|
||||
|
||||
;; Create a new empty instruction sequence builder.
|
||||
(decl inst_builder_new () VecMInstBuilder)
|
||||
(extern constructor inst_builder_new inst_builder_new)
|
||||
|
||||
;; Push an instruction to a sequence under construction.
|
||||
(decl inst_builder_push (VecMInstBuilder MInst) Unit)
|
||||
(extern constructor inst_builder_push inst_builder_push)
|
||||
|
||||
;; Complete the sequence under construction.
|
||||
(decl inst_builder_finish (VecMInstBuilder) VecMInst)
|
||||
(extern constructor inst_builder_finish inst_builder_finish)
|
||||
|
||||
;; It is not safe to write to virtual registers in the loop, so all destination
|
||||
;; registers must be real. This must be handled by the user of these helpers,
|
||||
;; so we simply verify this constraint here.
|
||||
(decl real_reg (WritableReg) WritableReg)
|
||||
(extern extractor real_reg real_reg)
|
||||
|
||||
;; Similarly, because we cannot allocate temp registers, if an instruction
|
||||
;; requires matching source and destination registers, this needs to be handled
|
||||
;; by the user. Another helper to verify that constraint.
|
||||
(decl same_reg (WritableReg) Reg)
|
||||
(extern extractor same_reg same_reg (in))
|
||||
|
||||
;; Push a `MInst.AluRRR` instruction to a sequence.
|
||||
(decl push_alu_reg (VecMInstBuilder ALUOp WritableReg Reg Reg) Reg)
|
||||
(rule (push_alu_reg ib op (real_reg dst) src1 src2)
|
||||
(let ((_ Unit (inst_builder_push ib (MInst.AluRRR op dst src1 src2))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Push a `MInst.AluRUImm32Shifted` instruction to a sequence.
|
||||
(decl push_alu_uimm32shifted (VecMInstBuilder ALUOp WritableReg Reg UImm32Shifted) Reg)
|
||||
(rule (push_alu_uimm32shifted ib op (real_reg dst) (same_reg <dst) imm)
|
||||
(let ((_ Unit (inst_builder_push ib (MInst.AluRUImm32Shifted op dst imm))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Push a `MInst.ShiftRR` instruction to a sequence.
|
||||
(decl push_shift (VecMInstBuilder ShiftOp WritableReg Reg u8 Reg) Reg)
|
||||
(rule (push_shift ib op (real_reg dst) src shift_imm shift_reg)
|
||||
(let ((_ Unit (inst_builder_push ib
|
||||
(MInst.ShiftRR op dst src shift_imm shift_reg))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Push a `MInst.RxSBG` instruction to a sequence.
|
||||
(decl push_rxsbg (VecMInstBuilder RxSBGOp WritableReg Reg Reg u8 u8 i8) Reg)
|
||||
(rule (push_rxsbg ib op (real_reg dst) (same_reg <dst) src start_bit end_bit rotate_amt)
|
||||
(let ((_ Unit (inst_builder_push ib
|
||||
(MInst.RxSBG op dst src start_bit end_bit rotate_amt))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Push a `MInst.UnaryRR` instruction to a sequence.
|
||||
(decl push_unary (VecMInstBuilder UnaryOp WritableReg Reg) Reg)
|
||||
(rule (push_unary ib op (real_reg dst) src)
|
||||
(let ((_ Unit (inst_builder_push ib (MInst.UnaryRR op dst src))))
|
||||
(writable_reg_to_reg dst)))
|
||||
|
||||
;; Push a `MInst.AtomicCas32` instruction to a sequence.
|
||||
(decl push_atomic_cas32 (VecMInstBuilder WritableReg Reg MemArg) Reg)
|
||||
(rule (push_atomic_cas32 ib (real_reg dst_src1) src2 mem)
|
||||
(let ((_ Unit (inst_builder_push ib (MInst.AtomicCas32 dst_src1 src2 mem))))
|
||||
(writable_reg_to_reg dst_src1)))
|
||||
|
||||
;; Push a `MInst.AtomicCas64` instruction to a sequence.
|
||||
(decl push_atomic_cas64 (VecMInstBuilder WritableReg Reg MemArg) Reg)
|
||||
(rule (push_atomic_cas64 ib (real_reg dst_src1) src2 mem)
|
||||
(let ((_ Unit (inst_builder_push ib (MInst.AtomicCas64 dst_src1 src2 mem))))
|
||||
(writable_reg_to_reg dst_src1)))
|
||||
|
||||
;; Push instructions to break out of the loop if condition is met.
|
||||
(decl push_break_if (VecMInstBuilder ProducesFlags Cond) Reg)
|
||||
(rule (push_break_if ib (ProducesFlags.ProducesFlags inst result) cond)
|
||||
(let ((_1 Unit (inst_builder_push ib inst))
|
||||
(_2 Unit (inst_builder_push ib (MInst.CondBreak cond))))
|
||||
result))
|
||||
|
||||
;; Emit a `MInst.Loop` instruction holding a loop body instruction sequence.
|
||||
(decl emit_loop (VecMInstBuilder Cond) Unit)
|
||||
(rule (emit_loop ib cond)
|
||||
(emit (MInst.Loop (inst_builder_finish ib) cond)))
|
||||
|
||||
|
||||
;; Helpers for generating register moves ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; Move source register into destination. (Non-SSA form.)
|
||||
@@ -1747,6 +1887,13 @@
|
||||
(decl copy_reg (Type Reg) Reg)
|
||||
(rule (copy_reg ty reg) (writable_reg_to_reg (copy_writable_reg ty reg)))
|
||||
|
||||
;; Move from memory location into destination.
|
||||
(decl emit_load (Type WritableReg MemArg) Unit)
|
||||
(rule (emit_load $I32 dst addr)
|
||||
(emit (MInst.Load32 dst addr)))
|
||||
(rule (emit_load $I64 dst addr)
|
||||
(emit (MInst.Load64 dst addr)))
|
||||
|
||||
|
||||
;; Helpers for generating immediate values ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
@@ -2249,6 +2396,123 @@
|
||||
(trap_if_impl cond trap_code)))
|
||||
|
||||
|
||||
;;;; Helpers for compare-and-swap loops ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; We use the emit_loop functionality to create compare-and-swap loops.
|
||||
;; As noted there, code inside a loop emitted via emit_loop cannot write
|
||||
;; to any virtual register, only hard registers. We use the two reserved
|
||||
;; registers %r0 and %r1 in compare-and-swap loops.
|
||||
|
||||
;; %r0 always holds the value currently loaded from the memory location.
|
||||
(decl casloop_val_reg () WritableReg)
|
||||
(rule (casloop_val_reg) (writable_gpr 0))
|
||||
|
||||
;; %r1 is available to compute the new value to be written.
|
||||
(decl casloop_tmp_reg () WritableReg)
|
||||
(rule (casloop_tmp_reg) (writable_gpr 1))
|
||||
|
||||
;; This takes a loop body for a compare-and-swap loop, completes it by
|
||||
;; adding the actual compare-and-swap instruction, and emits the initial
|
||||
;; memory load followed by the loop itself. "val" is the new value to
|
||||
;; be written if the memory location still holds the old value in %r0.
|
||||
;; The result should be passed to "casloop_result" or (in the case of
|
||||
;; subword loops) to "casloop_rotate_result".
|
||||
(decl casloop_emit (VecMInstBuilder Type MemFlags Reg Reg) Reg)
|
||||
(rule (casloop_emit ib ty flags aligned_addr val)
|
||||
(let (;; Construct a memory argument for the aligned word.
|
||||
(aligned_mem MemArg (memarg_reg_plus_off aligned_addr 0 flags))
|
||||
;; Add the compare-and-swap instruction to the builder.
|
||||
(result Reg (push_atomic_cas ib (ty_ext32 ty)
|
||||
(casloop_val_reg) val aligned_mem))
|
||||
;; Emit initial load followed by compare-and-swap loop.
|
||||
(_1 Unit (emit_load (ty_ext32 ty) (casloop_val_reg) aligned_mem))
|
||||
(_2 Unit (emit_loop ib (intcc_as_cond (IntCC.NotEqual)))))
|
||||
result))
|
||||
|
||||
;; Compute the previous memory value after a (fullword) compare-and-swap loop.
|
||||
;; In the big-endian case, the value is already correct, but may need to be
|
||||
;; copied out of the hard register. In the little-endian case, we need to
|
||||
;; byte-swap since the compare-and-swap instruction is always big-endian.
|
||||
(decl casloop_result (Type MemFlags Reg) Reg)
|
||||
(rule (casloop_result (ty_32_or_64 ty) (bigendian) result)
|
||||
(copy_reg ty result))
|
||||
(rule (casloop_result (ty_32_or_64 ty) (littleendian) result)
|
||||
(bswap_reg ty result))
|
||||
|
||||
;; Emit a fullword compare-and-swap loop, returning the previous memory value.
|
||||
(decl casloop (VecMInstBuilder Type MemFlags Reg Reg) Reg)
|
||||
(rule (casloop ib ty flags aligned_addr val)
|
||||
(casloop_result ty flags (casloop_emit ib ty flags aligned_addr val)))
|
||||
|
||||
;; For types smaller than $I32, we have no native compare-and-swap
|
||||
;; instruction, so we need to perform the compare-and-swap loop on the
|
||||
;; surrounding aligned word. To actually operate on the target $I8 or
|
||||
;; $I16 data, that aligned word then needs to be rotated by an amount
|
||||
;; determined by the low address bits.
|
||||
|
||||
;; Determine the rotate amount to bring the target data into a position
|
||||
;; in the high bytes of the enclosing $I32. Since the compare-and-swap
|
||||
;; instruction performs a big-endian memory access, this can be done by
|
||||
;; rotating (left) by "(addr & 3) * 8" bits, or "(addr << 3) & 31" bits.
|
||||
;; We can omit the "& 31" since this is implicit with a 32-bit rotate.
|
||||
(decl casloop_bitshift (Reg) Reg)
|
||||
(rule (casloop_bitshift addr)
|
||||
(lshl_imm $I32 addr 3))
|
||||
|
||||
;; The address of the surrounding 32-bit word, by masking off low bits.
|
||||
(decl casloop_aligned_addr (Reg) Reg)
|
||||
(rule (casloop_aligned_addr addr)
|
||||
(and_uimm16shifted $I64 addr (uimm16shifted 0xfffc 0)))
|
||||
|
||||
;; Push an instruction sequence to rotate a value loaded from memory
|
||||
;; to the well-defined location: the high bytes in case of a big-endian
|
||||
;; memory operation, and the low bytes in the little-endian case.
|
||||
;; (This is somewhat arbitary but chosen to allow the most efficient
|
||||
;; sequences to compute the various atomic operations.)
|
||||
;; Note that $I8 accesses always use the big-endian case.
|
||||
(decl casloop_rotate_in (VecMInstBuilder Type MemFlags Reg Reg) Reg)
|
||||
(rule (casloop_rotate_in ib $I8 _ bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 0 bitshift))
|
||||
(rule (casloop_rotate_in ib $I16 (bigendian) bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 0 bitshift))
|
||||
(rule (casloop_rotate_in ib $I16 (littleendian) bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 16 bitshift))
|
||||
|
||||
;; The inverse operation: rotate values back to the original memory order.
|
||||
;; This can be done by simply using the negated shift count. As an extra
|
||||
;; optimization, we note that in the $I16 case the shift count can only
|
||||
;; take the values 0 or 16, both of which negate to themselves (mod 32),
|
||||
;; so the explicit negation operation can be omitted here.
|
||||
(decl casloop_rotate_out (VecMInstBuilder Type MemFlags Reg Reg) Reg)
|
||||
(rule (casloop_rotate_out ib $I8 _ bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 0 (neg_reg $I32 bitshift)))
|
||||
(rule (casloop_rotate_out ib $I16 (bigendian) bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 0 bitshift))
|
||||
(rule (casloop_rotate_out ib $I16 (littleendian) bitshift val)
|
||||
(push_rot_imm_reg ib $I32 (casloop_tmp_reg) val 16 bitshift))
|
||||
|
||||
;; Compute the previous memory value after a subword compare-and-swap loop.
|
||||
;; This is similar to casloop_rotate_in, but brings the value to the *low*
|
||||
;; bytes. This can be achieved simply by adding the type size to the rotate
|
||||
;; amount, which can be done within the same instruction. In the little-
|
||||
;; endian case, we also need to byte-swap the result. Since we only have
|
||||
;; a 32-bit byte-swap instruction, we load the value to the high bytes in
|
||||
;; this case before performing the 32-bit byte-swap.
|
||||
(decl casloop_rotate_result (Type MemFlags Reg Reg) Reg)
|
||||
(rule (casloop_rotate_result $I8 _ bitshift result)
|
||||
(rot_imm_reg $I32 result 8 bitshift))
|
||||
(rule (casloop_rotate_result $I16 (bigendian) bitshift result)
|
||||
(rot_imm_reg $I32 result 16 bitshift))
|
||||
(rule (casloop_rotate_result $I16 (littleendian) bitshift result)
|
||||
(bswap_reg $I32 (rot_reg $I32 result bitshift)))
|
||||
|
||||
;; Emit a subword compare-and-swap loop, returning the previous memory value.
|
||||
(decl casloop_subword (VecMInstBuilder Type MemFlags Reg Reg Reg) Reg)
|
||||
(rule (casloop_subword ib ty flags aligned_addr bitshift val)
|
||||
(casloop_rotate_result ty flags bitshift
|
||||
(casloop_emit ib ty flags aligned_addr val)))
|
||||
|
||||
|
||||
;; Helpers for generating `clz` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; Count leading zeroes. For a zero input, return the specified value.
|
||||
@@ -2497,6 +2761,9 @@
|
||||
(decl xor_mem (Type Reg MemArg) Reg)
|
||||
(rule (xor_mem ty x y) (alu_rx ty (aluop_xor ty) x y))
|
||||
|
||||
(decl push_xor_uimm32shifted (VecMInstBuilder Type WritableReg Reg UImm32Shifted) Reg)
|
||||
(rule (push_xor_uimm32shifted ib ty dst src imm)
|
||||
(push_alu_uimm32shifted ib (aluop_xor ty) dst src imm))
|
||||
|
||||
;; Helpers for generating `not` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
@@ -2508,6 +2775,13 @@
|
||||
(xor_uimm32shifted ty x (uimm32shifted 0xffffffff 0))
|
||||
(uimm32shifted 0xffffffff 32)))
|
||||
|
||||
(decl push_not_reg (VecMInstBuilder Type WritableReg Reg) Reg)
|
||||
(rule (push_not_reg ib (gpr32_ty ty) dst src)
|
||||
(push_xor_uimm32shifted ib ty dst src (uimm32shifted 0xffffffff 0)))
|
||||
(rule (push_not_reg ib (gpr64_ty ty) dst src)
|
||||
(let ((val Reg (push_xor_uimm32shifted ib ty dst src (uimm32shifted 0xffffffff 0))))
|
||||
(push_xor_uimm32shifted ib ty dst val (uimm32shifted 0xffffffff 32))))
|
||||
|
||||
|
||||
;; Helpers for generating `and_not` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
@@ -2573,6 +2847,19 @@
|
||||
(rule (neg_reg_sext32 ty x) (unary_rr ty (unaryop_neg_sext32 ty) x))
|
||||
|
||||
|
||||
;; Helpers for generating `bswap` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
(decl unaryop_bswap (Type) UnaryOp)
|
||||
(rule (unaryop_bswap $I32) (UnaryOp.BSwap32))
|
||||
(rule (unaryop_bswap $I64) (UnaryOp.BSwap64))
|
||||
|
||||
(decl bswap_reg (Type Reg) Reg)
|
||||
(rule (bswap_reg ty x) (unary_rr ty (unaryop_bswap ty) x))
|
||||
|
||||
(decl push_bswap_reg (VecMInstBuilder Type WritableReg Reg) Reg)
|
||||
(rule (push_bswap_reg ib ty dst src) (push_unary ib (unaryop_bswap ty) dst src))
|
||||
|
||||
|
||||
;; Helpers for generating `rot` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
(decl shiftop_rot (Type) ShiftOp)
|
||||
@@ -2587,6 +2874,14 @@
|
||||
(rule (rot_imm ty x shift_imm)
|
||||
(shift_rr ty (shiftop_rot ty) x shift_imm (zero_reg)))
|
||||
|
||||
(decl rot_imm_reg (Type Reg u8 Reg) Reg)
|
||||
(rule (rot_imm_reg ty x shift_imm shift_reg)
|
||||
(shift_rr ty (shiftop_rot ty) x shift_imm shift_reg))
|
||||
|
||||
(decl push_rot_imm_reg (VecMInstBuilder Type WritableReg Reg u8 Reg) Reg)
|
||||
(rule (push_rot_imm_reg ib ty dst src shift_imm shift_reg)
|
||||
(push_shift ib (shiftop_rot ty) dst src shift_imm shift_reg))
|
||||
|
||||
|
||||
;; Helpers for generating `lshl` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
@@ -2663,6 +2958,17 @@
|
||||
(rule (atomic_rmw_add $I64 src mem) (atomic_rmw_impl $I64 (ALUOp.Add64) src mem))
|
||||
|
||||
|
||||
;; Helpers for generating `atomic_cas` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
(decl atomic_cas_impl (Type Reg Reg MemArg) Reg)
|
||||
(rule (atomic_cas_impl $I32 src1 src2 mem) (atomic_cas32 src1 src2 mem))
|
||||
(rule (atomic_cas_impl $I64 src1 src2 mem) (atomic_cas64 src1 src2 mem))
|
||||
|
||||
(decl push_atomic_cas (VecMInstBuilder Type WritableReg Reg MemArg) Reg)
|
||||
(rule (push_atomic_cas ib $I32 src1 src2 mem) (push_atomic_cas32 ib src1 src2 mem))
|
||||
(rule (push_atomic_cas ib $I64 src1 src2 mem) (push_atomic_cas64 ib src1 src2 mem))
|
||||
|
||||
|
||||
;; Helpers for generating `fadd` instructions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
(decl fpuop2_add (Type) FPUOp2)
|
||||
|
||||
@@ -426,6 +426,28 @@ fn enc_rie_d(opcode: u16, r1: Reg, r3: Reg, i2: u16) -> [u8; 6] {
|
||||
enc
|
||||
}
|
||||
|
||||
/// RIEf-type instructions.
|
||||
///
|
||||
/// 47 39 35 31 23 15 7
|
||||
/// opcode1 r1 r2 i3 i4 i5 opcode2
|
||||
/// 40 36 32 24 16 8 0
|
||||
///
|
||||
fn enc_rie_f(opcode: u16, r1: Reg, r2: Reg, i3: u8, i4: u8, i5: u8) -> [u8; 6] {
|
||||
let mut enc: [u8; 6] = [0; 6];
|
||||
let opcode1 = ((opcode >> 8) & 0xff) as u8;
|
||||
let opcode2 = (opcode & 0xff) as u8;
|
||||
let r1 = machreg_to_gpr(r1) & 0x0f;
|
||||
let r2 = machreg_to_gpr(r2) & 0x0f;
|
||||
|
||||
enc[0] = opcode1;
|
||||
enc[1] = r1 << 4 | r2;
|
||||
enc[2] = i3;
|
||||
enc[3] = i4;
|
||||
enc[4] = i5;
|
||||
enc[5] = opcode2;
|
||||
enc
|
||||
}
|
||||
|
||||
/// RIEg-type instructions.
|
||||
///
|
||||
/// 47 39 35 31 15 7
|
||||
@@ -1188,6 +1210,60 @@ impl MachInstEmit for Inst {
|
||||
);
|
||||
}
|
||||
|
||||
&Inst::RxSBG {
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
rotate_amt,
|
||||
} => {
|
||||
let opcode = match op {
|
||||
RxSBGOp::Insert => 0xec59, // RISBGN
|
||||
RxSBGOp::And => 0xec54, // RNSBG
|
||||
RxSBGOp::Or => 0xec56, // ROSBG
|
||||
RxSBGOp::Xor => 0xec57, // RXSBG
|
||||
};
|
||||
put(
|
||||
sink,
|
||||
&enc_rie_f(
|
||||
opcode,
|
||||
rd.to_reg(),
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
(rotate_amt as u8) & 63,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
&Inst::RxSBGTest {
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
rotate_amt,
|
||||
} => {
|
||||
let opcode = match op {
|
||||
RxSBGOp::And => 0xec54, // RNSBG
|
||||
RxSBGOp::Or => 0xec56, // ROSBG
|
||||
RxSBGOp::Xor => 0xec57, // RXSBG
|
||||
_ => unreachable!(),
|
||||
};
|
||||
put(
|
||||
sink,
|
||||
&enc_rie_f(
|
||||
opcode,
|
||||
rd,
|
||||
rn,
|
||||
start_bit | 0x80,
|
||||
end_bit,
|
||||
(rotate_amt as u8) & 63,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
&Inst::UnaryRR { op, rd, rn } => {
|
||||
match op {
|
||||
UnaryOp::Abs32 => {
|
||||
@@ -1222,6 +1298,14 @@ impl MachInstEmit for Inst {
|
||||
let opcode = 0xb9e1; // POPCNT
|
||||
put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 8, 0));
|
||||
}
|
||||
UnaryOp::BSwap32 => {
|
||||
let opcode = 0xb91f; // LRVR
|
||||
put(sink, &enc_rre(opcode, rd.to_reg(), rn));
|
||||
}
|
||||
UnaryOp::BSwap64 => {
|
||||
let opcode = 0xb90f; // LRVRG
|
||||
put(sink, &enc_rre(opcode, rd.to_reg(), rn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1406,6 +1490,39 @@ impl MachInstEmit for Inst {
|
||||
state,
|
||||
);
|
||||
}
|
||||
&Inst::Loop { ref body, cond } => {
|
||||
// This sequence is *one* instruction in the vcode, and is expanded only here at
|
||||
// emission time, because it requires branching to internal labels.
|
||||
let loop_label = sink.get_label();
|
||||
let done_label = sink.get_label();
|
||||
|
||||
// Emit label at the start of the loop.
|
||||
sink.bind_label(loop_label);
|
||||
|
||||
for inst in (&body).into_iter() {
|
||||
match &inst {
|
||||
// Replace a CondBreak with a branch to done_label.
|
||||
&Inst::CondBreak { cond } => {
|
||||
let inst = Inst::OneWayCondBr {
|
||||
target: done_label,
|
||||
cond: *cond,
|
||||
};
|
||||
inst.emit(sink, emit_info, state);
|
||||
}
|
||||
_ => inst.emit(sink, emit_info, state),
|
||||
};
|
||||
}
|
||||
|
||||
let inst = Inst::OneWayCondBr {
|
||||
target: loop_label,
|
||||
cond,
|
||||
};
|
||||
inst.emit(sink, emit_info, state);
|
||||
|
||||
// Emit label at the end of the loop.
|
||||
sink.bind_label(done_label);
|
||||
}
|
||||
&Inst::CondBreak { .. } => unreachable!(), // Only valid inside a Loop.
|
||||
&Inst::AtomicCas32 { rd, rn, ref mem } | &Inst::AtomicCas64 { rd, rn, ref mem } => {
|
||||
let (opcode_rs, opcode_rsy) = match self {
|
||||
&Inst::AtomicCas32 { .. } => (Some(0xba), Some(0xeb14)), // CS(Y)
|
||||
|
||||
@@ -1478,6 +1478,24 @@ fn test_s390x_binemit() {
|
||||
"B9E1801A",
|
||||
"popcnt %r1, %r10, 8",
|
||||
));
|
||||
insns.push((
|
||||
Inst::UnaryRR {
|
||||
op: UnaryOp::BSwap32,
|
||||
rd: writable_gpr(1),
|
||||
rn: gpr(10),
|
||||
},
|
||||
"B91F001A",
|
||||
"lrvr %r1, %r10",
|
||||
));
|
||||
insns.push((
|
||||
Inst::UnaryRR {
|
||||
op: UnaryOp::BSwap64,
|
||||
rd: writable_gpr(1),
|
||||
rn: gpr(10),
|
||||
},
|
||||
"B90F001A",
|
||||
"lrvgr %r1, %r10",
|
||||
));
|
||||
|
||||
insns.push((
|
||||
Inst::CmpRR {
|
||||
@@ -2410,6 +2428,91 @@ fn test_s390x_binemit() {
|
||||
"srag %r4, %r5, 63(%r6)",
|
||||
));
|
||||
|
||||
insns.push((
|
||||
Inst::RxSBG {
|
||||
op: RxSBGOp::Insert,
|
||||
rd: writable_gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: -16,
|
||||
},
|
||||
"EC4508203059",
|
||||
"risbgn %r4, %r5, 8, 32, 48",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBG {
|
||||
op: RxSBGOp::And,
|
||||
rd: writable_gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4508203F54",
|
||||
"rnsbg %r4, %r5, 8, 32, 63",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBG {
|
||||
op: RxSBGOp::Or,
|
||||
rd: writable_gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4508203F56",
|
||||
"rosbg %r4, %r5, 8, 32, 63",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBG {
|
||||
op: RxSBGOp::Xor,
|
||||
rd: writable_gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4508203F57",
|
||||
"rxsbg %r4, %r5, 8, 32, 63",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBGTest {
|
||||
op: RxSBGOp::And,
|
||||
rd: gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4588203F54",
|
||||
"rnsbg %r4, %r5, 136, 32, 63",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBGTest {
|
||||
op: RxSBGOp::Or,
|
||||
rd: gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4588203F56",
|
||||
"rosbg %r4, %r5, 136, 32, 63",
|
||||
));
|
||||
insns.push((
|
||||
Inst::RxSBGTest {
|
||||
op: RxSBGOp::Xor,
|
||||
rd: gpr(4),
|
||||
rn: gpr(5),
|
||||
start_bit: 8,
|
||||
end_bit: 32,
|
||||
rotate_amt: 63,
|
||||
},
|
||||
"EC4588203F57",
|
||||
"rxsbg %r4, %r5, 136, 32, 63",
|
||||
));
|
||||
|
||||
insns.push((
|
||||
Inst::AtomicRmw {
|
||||
alu_op: ALUOp::Add32,
|
||||
@@ -6699,6 +6802,34 @@ fn test_s390x_binemit() {
|
||||
"jno 6 ; trap",
|
||||
));
|
||||
|
||||
insns.push((
|
||||
Inst::Loop {
|
||||
body: vec![
|
||||
Inst::CmpRR {
|
||||
op: CmpOp::CmpS32,
|
||||
rn: gpr(2),
|
||||
rm: gpr(3),
|
||||
},
|
||||
Inst::CondBreak {
|
||||
cond: Cond::from_mask(13),
|
||||
},
|
||||
Inst::AtomicCas32 {
|
||||
rd: writable_gpr(4),
|
||||
rn: gpr(5),
|
||||
mem: MemArg::BXD12 {
|
||||
base: gpr(6),
|
||||
index: zero_reg(),
|
||||
disp: UImm12::maybe_from_u64(0).unwrap(),
|
||||
flags: MemFlags::trusted(),
|
||||
},
|
||||
},
|
||||
],
|
||||
cond: Cond::from_mask(6),
|
||||
},
|
||||
"1923C0D400000008BA456000C064FFFFFFFA",
|
||||
"0: cr %r2, %r3 ; jgnh 1f ; cs %r4, %r5, 0(%r6) ; jglh 0b ; 1:",
|
||||
));
|
||||
|
||||
insns.push((
|
||||
Inst::FpuMove32 {
|
||||
rd: writable_fpr(8),
|
||||
|
||||
@@ -35,7 +35,7 @@ mod emit_tests;
|
||||
|
||||
pub use crate::isa::s390x::lower::isle::generated_code::{
|
||||
ALUOp, CmpOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst,
|
||||
ShiftOp, UnaryOp,
|
||||
RxSBGOp, ShiftOp, UnaryOp,
|
||||
};
|
||||
|
||||
/// Additional information for (direct) Call instructions, left out of line to lower the size of
|
||||
@@ -93,6 +93,8 @@ impl Inst {
|
||||
| Inst::AluRUImm16Shifted { .. }
|
||||
| Inst::AluRUImm32Shifted { .. }
|
||||
| Inst::ShiftRR { .. }
|
||||
| Inst::RxSBG { .. }
|
||||
| Inst::RxSBGTest { .. }
|
||||
| Inst::SMulWide { .. }
|
||||
| Inst::UMulWide { .. }
|
||||
| Inst::SDivMod32 { .. }
|
||||
@@ -191,6 +193,8 @@ impl Inst {
|
||||
| Inst::JTSequence { .. }
|
||||
| Inst::LoadExtNameFar { .. }
|
||||
| Inst::LoadAddr { .. }
|
||||
| Inst::Loop { .. }
|
||||
| Inst::CondBreak { .. }
|
||||
| Inst::VirtualSPOffsetAdj { .. }
|
||||
| Inst::ValueLabelMarker { .. }
|
||||
| Inst::Unwind { .. } => InstructionSet::Base,
|
||||
@@ -437,6 +441,14 @@ fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
|
||||
collector.add_use(shift_reg);
|
||||
}
|
||||
}
|
||||
&Inst::RxSBG { rd, rn, .. } => {
|
||||
collector.add_mod(rd);
|
||||
collector.add_use(rn);
|
||||
}
|
||||
&Inst::RxSBGTest { rd, rn, .. } => {
|
||||
collector.add_use(rd);
|
||||
collector.add_use(rn);
|
||||
}
|
||||
&Inst::UnaryRR { rd, rn, .. } => {
|
||||
collector.add_def(rd);
|
||||
collector.add_use(rn);
|
||||
@@ -687,6 +699,12 @@ fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
|
||||
collector.add_def(rd);
|
||||
memarg_regs(mem, collector);
|
||||
}
|
||||
&Inst::Loop { ref body, .. } => {
|
||||
for inst in body.iter() {
|
||||
s390x_get_regs(inst, collector);
|
||||
}
|
||||
}
|
||||
&Inst::CondBreak { .. } => {}
|
||||
&Inst::VirtualSPOffsetAdj { .. } => {}
|
||||
&Inst::ValueLabelMarker { reg, .. } => {
|
||||
collector.add_use(reg);
|
||||
@@ -812,6 +830,22 @@ pub fn s390x_map_regs<RM: RegMapper>(inst: &mut Inst, mapper: &RM) {
|
||||
mapper.map_use(shift_reg);
|
||||
}
|
||||
}
|
||||
&mut Inst::RxSBG {
|
||||
ref mut rd,
|
||||
ref mut rn,
|
||||
..
|
||||
} => {
|
||||
mapper.map_mod(rd);
|
||||
mapper.map_use(rn);
|
||||
}
|
||||
&mut Inst::RxSBGTest {
|
||||
ref mut rd,
|
||||
ref mut rn,
|
||||
..
|
||||
} => {
|
||||
mapper.map_use(rd);
|
||||
mapper.map_use(rn);
|
||||
}
|
||||
&mut Inst::UnaryRR {
|
||||
ref mut rd,
|
||||
ref mut rn,
|
||||
@@ -1408,6 +1442,12 @@ pub fn s390x_map_regs<RM: RegMapper>(inst: &mut Inst, mapper: &RM) {
|
||||
mapper.map_def(rd);
|
||||
map_mem(mapper, mem);
|
||||
}
|
||||
&mut Inst::Loop { ref mut body, .. } => {
|
||||
for inst in body.iter_mut() {
|
||||
s390x_map_regs(inst, mapper);
|
||||
}
|
||||
}
|
||||
&mut Inst::CondBreak { .. } => {}
|
||||
&mut Inst::VirtualSPOffsetAdj { .. } => {}
|
||||
&mut Inst::ValueLabelMarker { ref mut reg, .. } => {
|
||||
mapper.map_use(reg);
|
||||
@@ -1909,6 +1949,58 @@ impl Inst {
|
||||
};
|
||||
format!("{} {}, {}, {}{}", op, rd, rn, shift_imm, shift_reg)
|
||||
}
|
||||
&Inst::RxSBG {
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
rotate_amt,
|
||||
} => {
|
||||
let op = match op {
|
||||
RxSBGOp::Insert => "risbgn",
|
||||
RxSBGOp::And => "rnsbg",
|
||||
RxSBGOp::Or => "rosbg",
|
||||
RxSBGOp::Xor => "rxsbg",
|
||||
};
|
||||
let rd = rd.to_reg().show_rru(mb_rru);
|
||||
let rn = rn.show_rru(mb_rru);
|
||||
format!(
|
||||
"{} {}, {}, {}, {}, {}",
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
(rotate_amt as u8) & 63
|
||||
)
|
||||
}
|
||||
&Inst::RxSBGTest {
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit,
|
||||
end_bit,
|
||||
rotate_amt,
|
||||
} => {
|
||||
let op = match op {
|
||||
RxSBGOp::And => "rnsbg",
|
||||
RxSBGOp::Or => "rosbg",
|
||||
RxSBGOp::Xor => "rxsbg",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let rd = rd.show_rru(mb_rru);
|
||||
let rn = rn.show_rru(mb_rru);
|
||||
format!(
|
||||
"{} {}, {}, {}, {}, {}",
|
||||
op,
|
||||
rd,
|
||||
rn,
|
||||
start_bit | 0x80,
|
||||
end_bit,
|
||||
(rotate_amt as u8) & 63
|
||||
)
|
||||
}
|
||||
&Inst::UnaryRR { op, rd, rn } => {
|
||||
let (op, extra) = match op {
|
||||
UnaryOp::Abs32 => ("lpr", ""),
|
||||
@@ -1919,6 +2011,8 @@ impl Inst {
|
||||
UnaryOp::Neg64Ext32 => ("lcgfr", ""),
|
||||
UnaryOp::PopcntByte => ("popcnt", ""),
|
||||
UnaryOp::PopcntReg => ("popcnt", ", 8"),
|
||||
UnaryOp::BSwap32 => ("lrvr", ""),
|
||||
UnaryOp::BSwap64 => ("lrvgr", ""),
|
||||
};
|
||||
let rd = rd.to_reg().show_rru(mb_rru);
|
||||
let rn = rn.show_rru(mb_rru);
|
||||
@@ -2644,6 +2738,19 @@ impl Inst {
|
||||
let mem = mem.show_rru(mb_rru);
|
||||
format!("{}{} {}, {}", mem_str, op, rd, mem)
|
||||
}
|
||||
&Inst::Loop { ref body, cond } => {
|
||||
let body = body
|
||||
.into_iter()
|
||||
.map(|inst| inst.show_rru(mb_rru))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ; ");
|
||||
let cond = cond.show_rru(mb_rru);
|
||||
format!("0: {} ; jg{} 0b ; 1:", body, cond)
|
||||
}
|
||||
&Inst::CondBreak { cond } => {
|
||||
let cond = cond.show_rru(mb_rru);
|
||||
format!("jg{} 1f", cond)
|
||||
}
|
||||
&Inst::VirtualSPOffsetAdj { offset } => {
|
||||
state.virtual_sp_offset += offset;
|
||||
format!("virtual_sp_offset_adjust {}", offset)
|
||||
|
||||
@@ -1497,24 +1497,44 @@
|
||||
|
||||
;;;; Rules for `atomic_rmw` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; Atomic operations that do not require a compare-and-swap loop.
|
||||
|
||||
;; Atomic AND for 32/64-bit big-endian types, using a single instruction.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (bigendian) (AtomicRmwOp.And) addr src)))
|
||||
(value_reg (atomic_rmw_and ty (put_in_reg src)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
|
||||
;; Atomic AND for 32/64-bit big-endian types, using byte-swapped input/output.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (littleendian) (AtomicRmwOp.And) addr src)))
|
||||
(value_reg (bswap_reg ty (atomic_rmw_and ty (bswap_reg ty (put_in_reg src))
|
||||
(lower_address flags addr (zero_offset))))))
|
||||
|
||||
;; Atomic OR for 32/64-bit big-endian types, using a single instruction.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (bigendian) (AtomicRmwOp.Or) addr src)))
|
||||
(value_reg (atomic_rmw_or ty (put_in_reg src)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
|
||||
;; Atomic OR for 32/64-bit little-endian types, using byte-swapped input/output.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (littleendian) (AtomicRmwOp.Or) addr src)))
|
||||
(value_reg (bswap_reg ty (atomic_rmw_or ty (bswap_reg ty (put_in_reg src))
|
||||
(lower_address flags addr (zero_offset))))))
|
||||
|
||||
;; Atomic XOR for 32/64-bit big-endian types, using a single instruction.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (bigendian) (AtomicRmwOp.Xor) addr src)))
|
||||
(value_reg (atomic_rmw_xor ty (put_in_reg src)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
|
||||
;; Atomic XOR for 32/64-bit little-endian types, using byte-swapped input/output.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (littleendian) (AtomicRmwOp.Xor) addr src)))
|
||||
(value_reg (bswap_reg ty (atomic_rmw_xor ty (bswap_reg ty (put_in_reg src))
|
||||
(lower_address flags addr (zero_offset))))))
|
||||
|
||||
;; Atomic ADD for 32/64-bit big-endian types, using a single instruction.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_rmw flags @ (bigendian) (AtomicRmwOp.Add) addr src)))
|
||||
@@ -1528,17 +1548,278 @@
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
|
||||
|
||||
;; Atomic operations that require a compare-and-swap loop.
|
||||
|
||||
;; Operations for 32/64-bit types can use a fullword compare-and-swap loop.
|
||||
(rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags op addr src)))
|
||||
(let ((src_reg Reg (put_in_reg src))
|
||||
(addr_reg Reg (put_in_reg addr))
|
||||
;; Create body of compare-and-swap loop.
|
||||
(ib VecMInstBuilder (inst_builder_new))
|
||||
(val0 Reg (writable_reg_to_reg (casloop_val_reg)))
|
||||
(val1 Reg (atomic_rmw_body ib ty flags op
|
||||
(casloop_tmp_reg) val0 src_reg)))
|
||||
;; Emit compare-and-swap loop and extract final result.
|
||||
(value_reg (casloop ib ty flags addr_reg val1))))
|
||||
|
||||
;; Operations for 8/16-bit types must operate on the surrounding aligned word.
|
||||
(rule (lower (has_type (ty_8_or_16 ty) (atomic_rmw flags op addr src)))
|
||||
(let ((src_reg Reg (put_in_reg src))
|
||||
(addr_reg Reg (put_in_reg addr))
|
||||
;; Prepare access to surrounding aligned word.
|
||||
(bitshift Reg (casloop_bitshift addr_reg))
|
||||
(aligned_addr Reg (casloop_aligned_addr addr_reg))
|
||||
;; Create body of compare-and-swap loop.
|
||||
(ib VecMInstBuilder (inst_builder_new))
|
||||
(val0 Reg (writable_reg_to_reg (casloop_val_reg)))
|
||||
(val1 Reg (casloop_rotate_in ib ty flags bitshift val0))
|
||||
(val2 Reg (atomic_rmw_body ib ty flags op
|
||||
(casloop_tmp_reg) val1 src_reg))
|
||||
(val3 Reg (casloop_rotate_out ib ty flags bitshift val2)))
|
||||
;; Emit compare-and-swap loop and extract final result.
|
||||
(value_reg (casloop_subword ib ty flags aligned_addr bitshift val3))))
|
||||
|
||||
;; Loop bodies for atomic read-modify-write operations.
|
||||
(decl atomic_rmw_body (VecMInstBuilder Type MemFlags AtomicRmwOp
|
||||
WritableReg Reg Reg) Reg)
|
||||
|
||||
;; Loop bodies for 32-/64-bit atomic XCHG operations.
|
||||
;; Simply use the source (possibly byte-swapped) as new target value.
|
||||
(rule (atomic_rmw_body ib (ty_32_or_64 ty) (bigendian)
|
||||
(AtomicRmwOp.Xchg) tmp val src)
|
||||
src)
|
||||
(rule (atomic_rmw_body ib (ty_32_or_64 ty) (littleendian)
|
||||
(AtomicRmwOp.Xchg) tmp val src)
|
||||
(bswap_reg ty src))
|
||||
|
||||
;; Loop bodies for 32-/64-bit atomic NAND operations.
|
||||
;; On z15 this can use the NN(G)RK instruction. On z14, perform an And
|
||||
;; operation and invert the result. In the little-endian case, we can
|
||||
;; simply byte-swap the source operand.
|
||||
(rule (atomic_rmw_body ib (and (mie2_enabled) (ty_32_or_64 ty)) (bigendian)
|
||||
(AtomicRmwOp.Nand) tmp val src)
|
||||
(push_alu_reg ib (aluop_and_not ty) tmp val src))
|
||||
(rule (atomic_rmw_body ib (and (mie2_enabled) (ty_32_or_64 ty)) (littleendian)
|
||||
(AtomicRmwOp.Nand) tmp val src)
|
||||
(push_alu_reg ib (aluop_and_not ty) tmp val (bswap_reg ty src)))
|
||||
(rule (atomic_rmw_body ib (and (mie2_disabled) (ty_32_or_64 ty)) (bigendian)
|
||||
(AtomicRmwOp.Nand) tmp val src)
|
||||
(push_not_reg ib ty tmp
|
||||
(push_alu_reg ib (aluop_and ty) tmp val src)))
|
||||
(rule (atomic_rmw_body ib (and (mie2_disabled) (ty_32_or_64 ty)) (littleendian)
|
||||
(AtomicRmwOp.Nand) tmp val src)
|
||||
(push_not_reg ib ty tmp
|
||||
(push_alu_reg ib (aluop_and ty) tmp val (bswap_reg ty src))))
|
||||
|
||||
;; Loop bodies for 8-/16-bit atomic bit operations.
|
||||
;; These use the "rotate-then-<op>-selected bits" family of instructions.
|
||||
;; For the Nand operation, we again perform And and invert the result.
|
||||
(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Xchg) tmp val src)
|
||||
(atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Insert) tmp val src))
|
||||
(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.And) tmp val src)
|
||||
(atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.And) tmp val src))
|
||||
(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Or) tmp val src)
|
||||
(atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Or) tmp val src))
|
||||
(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Xor) tmp val src)
|
||||
(atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Xor) tmp val src))
|
||||
(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Nand) tmp val src)
|
||||
(atomic_rmw_body_invert ib ty flags tmp
|
||||
(atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.And) tmp val src)))
|
||||
|
||||
;; RxSBG subword operation.
|
||||
(decl atomic_rmw_body_rxsbg (VecMInstBuilder Type MemFlags RxSBGOp
|
||||
WritableReg Reg Reg) Reg)
|
||||
;; 8-bit case: use the low byte of "src" and the high byte of "val".
|
||||
(rule (atomic_rmw_body_rxsbg ib $I8 _ op tmp val src)
|
||||
(push_rxsbg ib op tmp val src 32 40 24))
|
||||
;; 16-bit big-endian case: use the low two bytes of "src" and the
|
||||
;; high two bytes of "val".
|
||||
(rule (atomic_rmw_body_rxsbg ib $I16 (bigendian) op tmp val src)
|
||||
(push_rxsbg ib op tmp val src 32 48 16))
|
||||
;; 16-bit little-endian case: use the low two bytes of "src", byte-swapped
|
||||
;; so they end up in the high two bytes, and the low two bytes of "val".
|
||||
(rule (atomic_rmw_body_rxsbg ib $I16 (littleendian) op tmp val src)
|
||||
(push_rxsbg ib op tmp val (bswap_reg $I32 src) 48 64 -16))
|
||||
|
||||
;; Invert a subword.
|
||||
(decl atomic_rmw_body_invert (VecMInstBuilder Type MemFlags WritableReg Reg) Reg)
|
||||
;; 8-bit case: invert the high byte.
|
||||
(rule (atomic_rmw_body_invert ib $I8 _ tmp val)
|
||||
(push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xff000000 0)))
|
||||
;; 16-bit big-endian case: invert the two high bytes.
|
||||
(rule (atomic_rmw_body_invert ib $I16 (bigendian) tmp val)
|
||||
(push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xffff0000 0)))
|
||||
;; 16-bit little-endian case: invert the two low bytes.
|
||||
(rule (atomic_rmw_body_invert ib $I16 (littleendian) tmp val)
|
||||
(push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xffff 0)))
|
||||
|
||||
;; Loop bodies for atomic ADD/SUB operations.
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Add) tmp val src)
|
||||
(atomic_rmw_body_addsub ib ty flags (aluop_add (ty_ext32 ty)) tmp val src))
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Sub) tmp val src)
|
||||
(atomic_rmw_body_addsub ib ty flags (aluop_sub (ty_ext32 ty)) tmp val src))
|
||||
|
||||
;; Addition or subtraction operation.
|
||||
(decl atomic_rmw_body_addsub (VecMInstBuilder Type MemFlags ALUOp
|
||||
WritableReg Reg Reg) Reg)
|
||||
;; 32/64-bit big-endian case: just a regular add/sub operation.
|
||||
(rule (atomic_rmw_body_addsub ib (ty_32_or_64 ty) (bigendian) op tmp val src)
|
||||
(push_alu_reg ib op tmp val src))
|
||||
;; 32/64-bit little-endian case: byte-swap the value loaded from memory before
|
||||
;; and after performing the operation in native endianness.
|
||||
(rule (atomic_rmw_body_addsub ib (ty_32_or_64 ty) (littleendian) op tmp val src)
|
||||
(let ((val_swapped Reg (push_bswap_reg ib ty tmp val))
|
||||
(res_swapped Reg (push_alu_reg ib op tmp val_swapped src)))
|
||||
(push_bswap_reg ib ty tmp res_swapped)))
|
||||
;; 8-bit case: perform a 32-bit addition of the source value shifted by 24 bits
|
||||
;; to the memory value, which contains the target in its high byte.
|
||||
(rule (atomic_rmw_body_addsub ib $I8 _ op tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 24)))
|
||||
(push_alu_reg ib op tmp val src_shifted)))
|
||||
;; 16-bit big-endian case: similar, just shift the source by 16 bits.
|
||||
(rule (atomic_rmw_body_addsub ib $I16 (bigendian) op tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 16)))
|
||||
(push_alu_reg ib op tmp val src_shifted)))
|
||||
;; 16-bit little-endian case: the same, but in addition we need to byte-swap
|
||||
;; the memory value before and after the operation. Since the value was placed
|
||||
;; in the low two bytes by our standard rotation, we can use a 32-bit byte-swap
|
||||
;; and the native-endian value will end up in the high bytes where we need it
|
||||
;; to perform the operation.
|
||||
(rule (atomic_rmw_body_addsub ib $I16 (littleendian) op tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 16))
|
||||
(val_swapped Reg (push_bswap_reg ib $I32 tmp val))
|
||||
(res_swapped Reg (push_alu_reg ib op tmp val_swapped src_shifted)))
|
||||
(push_bswap_reg ib $I32 tmp res_swapped)))
|
||||
|
||||
;; Loop bodies for atomic MIN/MAX operations.
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Smin) tmp val src)
|
||||
(atomic_rmw_body_minmax ib ty flags (cmpop_cmps (ty_ext32 ty))
|
||||
(intcc_as_cond (IntCC.SignedLessThan)) tmp val src))
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Smax) tmp val src)
|
||||
(atomic_rmw_body_minmax ib ty flags (cmpop_cmps (ty_ext32 ty))
|
||||
(intcc_as_cond (IntCC.SignedGreaterThan)) tmp val src))
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Umin) tmp val src)
|
||||
(atomic_rmw_body_minmax ib ty flags (cmpop_cmpu (ty_ext32 ty))
|
||||
(intcc_as_cond (IntCC.UnsignedLessThan)) tmp val src))
|
||||
(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Umax) tmp val src)
|
||||
(atomic_rmw_body_minmax ib ty flags (cmpop_cmpu (ty_ext32 ty))
|
||||
(intcc_as_cond (IntCC.UnsignedGreaterThan)) tmp val src))
|
||||
|
||||
;; Minimum or maximum operation.
|
||||
(decl atomic_rmw_body_minmax (VecMInstBuilder Type MemFlags CmpOp Cond
|
||||
WritableReg Reg Reg) Reg)
|
||||
;; 32/64-bit big-endian case: just a comparison followed by a conditional
|
||||
;; break out of the loop if the memory value does not need to change.
|
||||
;; If it does need to change, the new value is simply the source operand.
|
||||
(rule (atomic_rmw_body_minmax ib (ty_32_or_64 ty) (bigendian)
|
||||
op cond tmp val src)
|
||||
(let ((_ Reg (push_break_if ib (cmp_rr op src val) (invert_cond cond))))
|
||||
src))
|
||||
;; 32/64-bit little-endian case: similar, but we need to byte-swap the
|
||||
;; memory value before the comparison. If we need to store the new value,
|
||||
;; it also needs to be byte-swapped.
|
||||
(rule (atomic_rmw_body_minmax ib (ty_32_or_64 ty) (littleendian)
|
||||
op cond tmp val src)
|
||||
(let ((val_swapped Reg (push_bswap_reg ib ty tmp val))
|
||||
(_ Reg (push_break_if ib (cmp_rr op src val_swapped)
|
||||
(invert_cond cond))))
|
||||
(push_bswap_reg ib ty tmp src)))
|
||||
;; 8-bit case: compare the memory value (which contains the target in the
|
||||
;; high byte) with the source operand shifted by 24 bits. Note that in
|
||||
;; the case where the high bytes are equal, the comparison may succeed
|
||||
;; or fail depending on the unrelated low bits of the memory value, and
|
||||
;; so we either may or may not perform the update. But it would be an
|
||||
;; update with the same value in any case, so this does not matter.
|
||||
(rule (atomic_rmw_body_minmax ib $I8 _ op cond tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 24))
|
||||
(_ Reg (push_break_if ib (cmp_rr op src_shifted val)
|
||||
(invert_cond cond))))
|
||||
(push_rxsbg ib (RxSBGOp.Insert) tmp val src_shifted 32 40 0)))
|
||||
;; 16-bit big-endian case: similar, just shift the source by 16 bits.
|
||||
(rule (atomic_rmw_body_minmax ib $I16 (bigendian) op cond tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 16))
|
||||
(_ Reg (push_break_if ib (cmp_rr op src_shifted val)
|
||||
(invert_cond cond))))
|
||||
(push_rxsbg ib (RxSBGOp.Insert) tmp val src_shifted 32 48 0)))
|
||||
;; 16-bit little-endian case: similar, but in addition byte-swap the
|
||||
;; memory value before and after the operation, like for _addsub_.
|
||||
(rule (atomic_rmw_body_minmax ib $I16 (littleendian) op cond tmp val src)
|
||||
(let ((src_shifted Reg (lshl_imm $I32 src 16))
|
||||
(val_swapped Reg (push_bswap_reg ib $I32 tmp val))
|
||||
(_ Reg (push_break_if ib (cmp_rr op src_shifted val_swapped)
|
||||
(invert_cond cond)))
|
||||
(res_swapped Reg (push_rxsbg ib (RxSBGOp.Insert)
|
||||
tmp val_swapped src_shifted 32 48 0)))
|
||||
(push_bswap_reg ib $I32 tmp res_swapped)))
|
||||
|
||||
|
||||
;;;; Rules for `atomic_cas` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; 32-bit big-endian atomic compare-and-swap instruction.
|
||||
(rule (lower (has_type $I32 (atomic_cas flags @ (bigendian) addr old new)))
|
||||
(value_reg (atomic_cas32 (put_in_reg old) (put_in_reg new)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
;; 32/64-bit big-endian atomic compare-and-swap instruction.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_cas flags @ (bigendian) addr src1 src2)))
|
||||
(value_reg (atomic_cas_impl ty (put_in_reg src1) (put_in_reg src2)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
|
||||
;; 64-bit big-endian atomic compare-and-swap instruction.
|
||||
(rule (lower (has_type $I64 (atomic_cas flags @ (bigendian) addr old new)))
|
||||
(value_reg (atomic_cas64 (put_in_reg old) (put_in_reg new)
|
||||
(lower_address flags addr (zero_offset)))))
|
||||
;; 32/64-bit little-endian atomic compare-and-swap instruction.
|
||||
;; Implemented by byte-swapping old/new inputs and the output.
|
||||
(rule (lower (has_type (ty_32_or_64 ty)
|
||||
(atomic_cas flags @ (littleendian) addr src1 src2)))
|
||||
(value_reg (bswap_reg ty (atomic_cas_impl ty (bswap_reg ty (put_in_reg src1))
|
||||
(bswap_reg ty (put_in_reg src2))
|
||||
(lower_address flags addr (zero_offset))))))
|
||||
|
||||
;; 8/16-bit atomic compare-and-swap implemented via loop.
|
||||
(rule (lower (has_type (ty_8_or_16 ty) (atomic_cas flags addr src1 src2)))
|
||||
(let ((src1_reg Reg (put_in_reg src1))
|
||||
(src2_reg Reg (put_in_reg src2))
|
||||
(addr_reg Reg (put_in_reg addr))
|
||||
;; Prepare access to the surrounding aligned word.
|
||||
(bitshift Reg (casloop_bitshift addr_reg))
|
||||
(aligned_addr Reg (casloop_aligned_addr addr_reg))
|
||||
;; Create body of compare-and-swap loop.
|
||||
(ib VecMInstBuilder (inst_builder_new))
|
||||
(val0 Reg (writable_reg_to_reg (casloop_val_reg)))
|
||||
(val1 Reg (casloop_rotate_in ib ty flags bitshift val0))
|
||||
(val2 Reg (atomic_cas_body ib ty flags
|
||||
(casloop_tmp_reg) val1 src1_reg src2_reg))
|
||||
(val3 Reg (casloop_rotate_out ib ty flags bitshift val2)))
|
||||
;; Emit compare-and-swap loop and extract final result.
|
||||
(value_reg (casloop_subword ib ty flags aligned_addr bitshift val3))))
|
||||
|
||||
;; Emit loop body instructions to perform a subword compare-and-swap.
|
||||
(decl atomic_cas_body (VecMInstBuilder Type MemFlags
|
||||
WritableReg Reg Reg Reg) Reg)
|
||||
|
||||
;; 8-bit case: "val" contains the value loaded from memory in the high byte.
|
||||
;; Compare with the comparison value in the low byte of "src1". If unequal,
|
||||
;; break out of the loop, otherwise replace the target byte in "val" with
|
||||
;; the low byte of "src2".
|
||||
(rule (atomic_cas_body ib $I8 _ tmp val src1 src2)
|
||||
(let ((_ Reg (push_break_if ib (rxsbg_test (RxSBGOp.Xor) val src1 32 40 24)
|
||||
(intcc_as_cond (IntCC.NotEqual)))))
|
||||
(push_rxsbg ib (RxSBGOp.Insert) tmp val src2 32 40 24)))
|
||||
|
||||
;; 16-bit big-endian case: Same as above, except with values in the high
|
||||
;; two bytes of "val" and low two bytes of "src1" and "src2".
|
||||
(rule (atomic_cas_body ib $I16 (bigendian) tmp val src1 src2)
|
||||
(let ((_ Reg (push_break_if ib (rxsbg_test (RxSBGOp.Xor) val src1 32 48 16)
|
||||
(intcc_as_cond (IntCC.NotEqual)))))
|
||||
(push_rxsbg ib (RxSBGOp.Insert) tmp val src2 32 48 16)))
|
||||
|
||||
;; 16-bit little-endian case: "val" here contains a little-endian value in the
|
||||
;; *low* two bytes. "src1" and "src2" contain native (i.e. big-endian) values
|
||||
;; in their low two bytes. Perform the operation in little-endian mode by
|
||||
;; byte-swapping "src1" and "src" ahead of the loop. Note that this is a
|
||||
;; 32-bit operation so the little-endian 16-bit values end up in the *high*
|
||||
;; two bytes of the swapped values.
|
||||
(rule (atomic_cas_body ib $I16 (littleendian) tmp val src1 src2)
|
||||
(let ((src1_swapped Reg (bswap_reg $I32 src1))
|
||||
(src2_swapped Reg (bswap_reg $I32 src2))
|
||||
(_ Reg (push_break_if ib
|
||||
(rxsbg_test (RxSBGOp.Xor) val src1_swapped 48 64 -16)
|
||||
(intcc_as_cond (IntCC.NotEqual)))))
|
||||
(push_rxsbg ib (RxSBGOp.Insert) tmp val src2_swapped 48 64 -16)))
|
||||
|
||||
|
||||
;;;; Rules for `atomic_load` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
@@ -21,6 +21,7 @@ use crate::{
|
||||
machinst::{InsnOutput, LowerCtx, RelocDistance},
|
||||
};
|
||||
use std::boxed::Box;
|
||||
use std::cell::Cell;
|
||||
use std::convert::TryFrom;
|
||||
use std::vec::Vec;
|
||||
|
||||
@@ -28,6 +29,8 @@ type BoxCallInfo = Box<CallInfo>;
|
||||
type BoxCallIndInfo = Box<CallIndInfo>;
|
||||
type VecMachLabel = Vec<MachLabel>;
|
||||
type BoxExternalName = Box<ExternalName>;
|
||||
type VecMInst = Vec<MInst>;
|
||||
type VecMInstBuilder = Cell<Vec<MInst>>;
|
||||
|
||||
/// The main entry point for lowering with ISLE.
|
||||
pub(crate) fn lower<C>(
|
||||
@@ -485,6 +488,41 @@ where
|
||||
self.lower_ctx.abi().stackslot_addr(stack_slot, offset, dst)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inst_builder_new(&mut self) -> VecMInstBuilder {
|
||||
Cell::new(Vec::<MInst>::new())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {
|
||||
let mut vec = builder.take();
|
||||
vec.push(inst.clone());
|
||||
builder.set(vec);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {
|
||||
builder.take()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {
|
||||
if reg.to_reg().is_real() {
|
||||
Some(reg)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn same_reg(&mut self, src: Reg, dst: WritableReg) -> Option<()> {
|
||||
if dst.to_reg() == src {
|
||||
Some(())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
|
||||
let input = self.lower_ctx.get_value_as_source_or_const(val);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
src/clif.isle 9ea75a6f790b5c03
|
||||
src/prelude.isle 6aaf8ce0f5a5c2ec
|
||||
src/isa/s390x/inst.isle f5af3708848ef1aa
|
||||
src/isa/s390x/lower.isle 57dcc39cbab2d1c6
|
||||
src/isa/s390x/inst.isle 1ae3c0f9c956affd
|
||||
src/isa/s390x/lower.isle d18ee0bff12cad4e
|
||||
|
||||
3040
cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs
generated
3040
cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs
generated
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,63 @@
|
||||
test compile
|
||||
target s390x
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_CAS
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_cas_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_cas.i64 little v2, v0, v1
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r2
|
||||
; nextln: lrvgr %r3, %r3
|
||||
; nextln: csg %r2, %r3, 0(%r4)
|
||||
; nextln: lrvgr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_cas_i32(i32, i32, i64) -> i32 {
|
||||
block0(v0: i32, v1: i32, v2: i64):
|
||||
v3 = atomic_cas.i32 little v2, v0, v1
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r2
|
||||
; nextln: lrvr %r3, %r3
|
||||
; nextln: cs %r2, %r3, 0(%r4)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_cas_i16(i64, i16, i16, i64) -> i16 {
|
||||
block0(v0: i64, v1: i16, v2: i16, v3: i64):
|
||||
v4 = atomic_cas.i16 little v3, v1, v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r5, 3
|
||||
; nextln: nill %r5, 65532
|
||||
; nextln: lrvr %r3, %r3
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r5)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r3, 176, 64, 48 ; jglh 1f ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_cas_i8(i64, i8, i8, i64) -> i8 {
|
||||
block0(v0: i64, v1: i8, v2: i8, v3: i64):
|
||||
v4 = atomic_cas.i8 little v3, v1, v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: stmg %r14, %r15, 112(%r15)
|
||||
; nextln: sllk %r2, %r5, 3
|
||||
; nextln: nill %r5, 65532
|
||||
; nextln: lcr %r14, %r2
|
||||
; nextln: l %r0, 0(%r5)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: lmg %r14, %r15, 112(%r15)
|
||||
; nextln: br %r14
|
||||
|
||||
@@ -23,3 +23,32 @@ block0(v0: i32, v1: i32, v2: i64):
|
||||
; check: cs %r2, %r3, 0(%r4)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_cas_i16(i64, i16, i16, i64) -> i16 {
|
||||
block0(v0: i64, v1: i16, v2: i16, v3: i64):
|
||||
v4 = atomic_cas.i16 v3, v1, v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r5, 3
|
||||
; nextln: nill %r5, 65532
|
||||
; nextln: l %r0, 0(%r5)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 48, 16 ; jglh 1f ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_cas_i8(i64, i8, i8, i64) -> i8 {
|
||||
block0(v0: i64, v1: i8, v2: i8, v3: i64):
|
||||
v4 = atomic_cas.i8 v3, v1, v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: stmg %r14, %r15, 112(%r15)
|
||||
; nextln: sllk %r2, %r5, 3
|
||||
; nextln: nill %r5, 65532
|
||||
; nextln: lcr %r14, %r2
|
||||
; nextln: l %r0, 0(%r5)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: lmg %r14, %r15, 112(%r15)
|
||||
; nextln: br %r14
|
||||
|
||||
|
||||
105
cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif
Normal file
105
cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif
Normal file
@@ -0,0 +1,105 @@
|
||||
test compile
|
||||
target s390x arch13
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: nngrk %r1, %r0, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: nnrk %r1, %r0, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: lg %r0, 0(%r3)
|
||||
; nextln: 0: nngrk %r1, %r0, %r2 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: nnrk %r1, %r0, %r2 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
620
cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif
Normal file
620
cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif
Normal file
@@ -0,0 +1,620 @@
|
||||
test compile
|
||||
target s390x
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (XCHG)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: lg %r0, 0(%r3)
|
||||
; nextln: 0: csg %r0, %r2, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: cs %r0, %r2, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (ADD)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_add_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; agr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_add_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; ar %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_add_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; ar %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_add_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SUB)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_sub_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; sgr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_sub_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; sr %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; sr %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (AND)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_and_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: lang %r2, %r2, 0(%r3)
|
||||
; nextln: lrvgr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_and_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: lan %r2, %r2, 0(%r3)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_and_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_and_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (OR)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_or_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: laog %r2, %r2, 0(%r3)
|
||||
; nextln: lrvgr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_or_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: lao %r2, %r2, 0(%r3)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_or_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rosbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_or_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (XOR)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_xor_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: laxg %r2, %r2, 0(%r3)
|
||||
; nextln: lrvgr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xor_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: lax %r2, %r2, 0(%r3)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (NAND)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvgr %r2, %r4
|
||||
; nextln: lg %r0, 0(%r3)
|
||||
; nextln: 0: ngrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lrvr %r2, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: nrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lrvr %r4, %r4
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SMIN)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SMAX)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (UMIN)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (UMAX)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 little umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 little umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lrvr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 little umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 0(%r2)
|
||||
; nextln: lrvr %r2, %r2
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 little umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
@@ -1,6 +1,59 @@
|
||||
test compile
|
||||
target s390x
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (XCHG)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 xchg v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (ADD)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -23,6 +76,35 @@ block0(v0: i64, v1: i32):
|
||||
; check: laa %r2, %r3, 0(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_add_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_add_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 add v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SUB)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -47,6 +129,35 @@ block0(v0: i64, v1: i32):
|
||||
; nextln: laa %r2, %r3, 0(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 sub v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (AND)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -69,6 +180,33 @@ block0(v0: i64, v1: i32):
|
||||
; check: lan %r2, %r3, 0(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_and_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_and_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 and v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (OR)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -91,6 +229,33 @@ block0(v0: i64, v1: i32):
|
||||
; check: lao %r2, %r3, 0(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_or_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_or_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 or v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (XOR)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -112,3 +277,304 @@ block0(v0: i64, v1: i32):
|
||||
|
||||
; check: lax %r2, %r3, 0(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 xor v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (NAND)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: ngrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: nrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 nand v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SMIN)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: cgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: cr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 smin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (SMAX)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: cgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: cr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 smax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (UMIN)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: clgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: clr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 umin v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ATOMIC_RMW (UMAX)
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = atomic_rmw.i64 umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: lg %r0, 0(%r3)
|
||||
; nextln: 0: clgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lgr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = atomic_rmw.i32 umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: l %r0, 0(%r3)
|
||||
; nextln: 0: clr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: lr %r2, %r0
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 {
|
||||
block0(v0: i64, v1: i64, v2: i16):
|
||||
v3 = atomic_rmw.i16 umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 16
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 16(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 {
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = atomic_rmw.i8 umax v1, v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sllk %r2, %r3, 3
|
||||
; nextln: nill %r3, 65532
|
||||
; nextln: sllk %r4, %r4, 24
|
||||
; nextln: lcr %r5, %r2
|
||||
; nextln: l %r0, 0(%r3)
|
||||
; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
||||
; nextln: rll %r2, %r0, 8(%r2)
|
||||
; nextln: br %r14
|
||||
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_i64(i64, i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v3
|
||||
|
||||
v4 = atomic_cas.i64 little v3, v1, v2
|
||||
|
||||
v5 = load.i64 little v3
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i64(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i64(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i64(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_cas_i32(i32, i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
v3 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = atomic_cas.i32 little v3, v1, v2
|
||||
|
||||
v5 = load.i32 little v3
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i32(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i32(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i32(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_big_i16(i32, i64, i16, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16, v3: i16):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i16 big v5, v2, v3
|
||||
|
||||
v7 = load.i32 big v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_big_i16(0x12345678, 0, 0x1234, 0xabcd) == 0xabcd5678
|
||||
; run: %atomic_cas_big_i16(0x12345678, 0, 0x4321, 0xabcd) == 0x12345678
|
||||
; run: %atomic_cas_big_i16(0x12345678, 2, 0x5678, 0xabcd) == 0x1234abcd
|
||||
; run: %atomic_cas_big_i16(0x12345678, 2, 0x8765, 0xabcd) == 0x12345678
|
||||
|
||||
function %atomic_cas_little_i16(i32, i64, i16, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16, v3: i16):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i16 little v5, v2, v3
|
||||
|
||||
v7 = load.i32 little v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_little_i16(0x12345678, 2, 0x1234, 0xabcd) == 0xabcd5678
|
||||
; run: %atomic_cas_little_i16(0x12345678, 2, 0x4321, 0xabcd) == 0x12345678
|
||||
; run: %atomic_cas_little_i16(0x12345678, 0, 0x5678, 0xabcd) == 0x1234abcd
|
||||
; run: %atomic_cas_little_i16(0x12345678, 0, 0x8765, 0xabcd) == 0x12345678
|
||||
|
||||
function %atomic_cas_big_i8(i32, i64, i8, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8, v3: i8):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i8 big v5, v2, v3
|
||||
|
||||
v7 = load.i32 big v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_big_i8(0x12345678, 0, 0x12, 0xab) == 0xab345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 0, 0x21, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 1, 0x34, 0xab) == 0x12ab5678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 1, 0x43, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 2, 0x56, 0xab) == 0x1234ab78
|
||||
; run: %atomic_cas_big_i8(0x12345678, 2, 0x65, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_big_i8(0x12345678, 3, 0x78, 0xab) == 0x123456ab
|
||||
; run: %atomic_cas_big_i8(0x12345678, 3, 0x87, 0xab) == 0x12345678
|
||||
|
||||
function %atomic_cas_little_i8(i32, i64, i8, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8, v3: i8):
|
||||
v4 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v4
|
||||
|
||||
v5 = iadd.i64 v4, v1
|
||||
v6 = atomic_cas.i8 little v5, v2, v3
|
||||
|
||||
v7 = load.i32 little v4
|
||||
return v7
|
||||
}
|
||||
; run: %atomic_cas_little_i8(0x12345678, 3, 0x12, 0xab) == 0xab345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 3, 0x21, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 2, 0x34, 0xab) == 0x12ab5678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 2, 0x43, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 1, 0x56, 0xab) == 0x1234ab78
|
||||
; run: %atomic_cas_little_i8(0x12345678, 1, 0x65, 0xab) == 0x12345678
|
||||
; run: %atomic_cas_little_i8(0x12345678, 0, 0x78, 0xab) == 0x123456ab
|
||||
; run: %atomic_cas_little_i8(0x12345678, 0, 0x87, 0xab) == 0x12345678
|
||||
|
||||
43
cranelift/filetests/filetests/runtests/atomic-cas.clif
Normal file
43
cranelift/filetests/filetests/runtests/atomic-cas.clif
Normal file
@@ -0,0 +1,43 @@
|
||||
test run
|
||||
target aarch64
|
||||
target aarch64 has_lse
|
||||
target x86_64
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_cas_i64(i64, i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v3 = stack_addr.i64 ss0
|
||||
v4 = atomic_cas.i64 v3, v1, v2
|
||||
|
||||
v5 = stack_load.i64 ss0
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i64(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i64(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i64(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_cas_i32(i32, i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v3 = stack_addr.i32 ss0
|
||||
v4 = atomic_cas.i32 v3, v1, v2
|
||||
|
||||
v5 = stack_load.i32 ss0
|
||||
return v5
|
||||
}
|
||||
; run: %atomic_cas_i32(0, 0, 2) == 2
|
||||
; run: %atomic_cas_i32(1, 0, 2) == 1
|
||||
; run: %atomic_cas_i32(0, 1, 2) == 0
|
||||
; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
test run
|
||||
target aarch64
|
||||
target aarch64 has_lse
|
||||
target x86_64
|
||||
; TODO: Merge this with atomic-rmw.clif when s390x supports it
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 nand v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 nand v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
429
cranelift/filetests/filetests/runtests/atomic-rmw-little.clif
Normal file
429
cranelift/filetests/filetests/runtests/atomic-rmw-little.clif
Normal file
@@ -0,0 +1,429 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_rmw_add_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little add v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_add_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_add_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_add_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_add_i64(1, 1) == 2
|
||||
; run: %atomic_rmw_add_i64(0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111) == 0xDECAFFFF_DECAFFFF
|
||||
|
||||
function %atomic_rmw_add_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little add v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_add_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_add_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_add_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_add_i32(1, 1) == 2
|
||||
; run: %atomic_rmw_add_i32(0xC0FFEEEE, 0x1DCB1111) == 0xDECAFFFF
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_sub_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little sub v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_sub_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_sub_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_sub_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_sub_i64(1, 1) == 0
|
||||
; run: %atomic_rmw_sub_i64(0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111) == 0xC0FFEEEE_C0FFEEEE
|
||||
|
||||
function %atomic_rmw_sub_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little sub v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_sub_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_sub_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_sub_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_sub_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_sub_i32(0xDECAFFFF, 0x1DCB1111) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_and_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little and v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_and_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_and_i64(0xF1FFFEFE_FEEEFFFF, 0xCEFFEFEF_DFDBFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_and_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little and v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; run: %atomic_rmw_and_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_and_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_and_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_and_i64(0xF1FFFEFE, 0xCEFFEFEF) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_or_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little or v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_or_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_or_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_or_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_or_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_or_i64(0x80AAAAAA_8A8AAAAA, 0x40554444_54405555) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_or_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little or v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
|
||||
; run: %atomic_rmw_or_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_or_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_or_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_or_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_or_i32(0x80AAAAAA, 0x40554444) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xor_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little xor v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xor_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xor_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_xor_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i64(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i64(0x8FA50A64_9440A07D, 0x4F5AE48A_4A8A5F82) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xor_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little xor v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xor_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xor_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_xor_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little nand v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little nand v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little umin v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little umin v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little umax v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little umax v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little smin v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little smin v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little smax v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little smax v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = stack_addr.i64 ss0
|
||||
store.i64 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i64 little xchg v2, v1
|
||||
|
||||
v4 = load.i64 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = stack_addr.i32 ss0
|
||||
store.i32 little v0, v2
|
||||
|
||||
v3 = atomic_rmw.i32 little xchg v2, v1
|
||||
|
||||
v4 = load.i32 little v2
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
907
cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif
Normal file
907
cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif
Normal file
@@ -0,0 +1,907 @@
|
||||
test run
|
||||
target s390x
|
||||
|
||||
; We can't test that these instructions are right regarding atomicity, but we can
|
||||
; test if they perform their operation correctly
|
||||
|
||||
function %atomic_rmw_add_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big add v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x23455678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x12346789
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little add v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x23455678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x12346789
|
||||
; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big add v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0x11) == 0x23345678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0xff) == 0x11345678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0x11) == 0x12455678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0xff) == 0x12335678
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0x11) == 0x12346778
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0xff) == 0x12345578
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0x11) == 0x12345689
|
||||
; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0xff) == 0x12345677
|
||||
|
||||
function %atomic_rmw_add_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little add v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0x11) == 0x23345678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0xff) == 0x11345678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0x11) == 0x12455678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0xff) == 0x12335678
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0x11) == 0x12346778
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0xff) == 0x12345578
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0x11) == 0x12345689
|
||||
; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0xff) == 0x12345677
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_sub_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big sub v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0x1111) == 0x01235678
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0xffff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0x1111) == 0x12344567
|
||||
; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0xffff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little sub v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0x1111) == 0x01235678
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0xffff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0x1111) == 0x12344567
|
||||
; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0xffff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big sub v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0x11) == 0x01345678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0xff) == 0x13345678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0x11) == 0x12235678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0xff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0x11) == 0x12344578
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0xff) == 0x12345778
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0x11) == 0x12345667
|
||||
; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0xff) == 0x12345679
|
||||
|
||||
function %atomic_rmw_sub_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little sub v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0x11) == 0x01345678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0xff) == 0x13345678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0x11) == 0x12235678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0xff) == 0x12355678
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0x11) == 0x12344578
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0xff) == 0x12345778
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0x11) == 0x12345667
|
||||
; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0xff) == 0x12345679
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_and_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big and v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0xf000) == 0x10005678
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0x000f) == 0x00045678
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0xf000) == 0x12345000
|
||||
; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0x000f) == 0x12340008
|
||||
|
||||
function %atomic_rmw_and_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little and v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0xf000) == 0x10005678
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0x000f) == 0x00045678
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0xf000) == 0x12345000
|
||||
; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0x000f) == 0x12340008
|
||||
|
||||
function %atomic_rmw_and_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big and v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0xf0) == 0x10345678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0x0f) == 0x02345678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0xf0) == 0x12305678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0x0f) == 0x12045678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0xf0) == 0x12345078
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0x0f) == 0x12340678
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0xf0) == 0x12345670
|
||||
; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0x0f) == 0x12345608
|
||||
|
||||
function %atomic_rmw_and_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little and v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0xf0) == 0x10345678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0x0f) == 0x02345678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0xf0) == 0x12305678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0x0f) == 0x12045678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0xf0) == 0x12345078
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0x0f) == 0x12340678
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0xf0) == 0x12345670
|
||||
; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0x0f) == 0x12345608
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_or_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big or v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0xf000) == 0xf2345678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0x000f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0xf000) == 0x1234f678
|
||||
; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0x000f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little or v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0xf000) == 0xf2345678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0x000f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0xf000) == 0x1234f678
|
||||
; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0x000f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big or v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0xf0) == 0xf2345678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0x0f) == 0x1f345678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0xf0) == 0x12f45678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0x0f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0xf0) == 0x1234f678
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0x0f) == 0x12345f78
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0xf0) == 0x123456f8
|
||||
; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0x0f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_or_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little or v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0xf0) == 0xf2345678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0x0f) == 0x1f345678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0xf0) == 0x12f45678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0x0f) == 0x123f5678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0xf0) == 0x1234f678
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0x0f) == 0x12345f78
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0xf0) == 0x123456f8
|
||||
; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0x0f) == 0x1234567f
|
||||
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xor_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big xor v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0xf000) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0x000f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0xf000) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0x000f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little xor v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0xf000) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0x000f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0xf000) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0x000f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big xor v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0xf0) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0x0f) == 0x1d345678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0xf0) == 0x12c45678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0x0f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0xf0) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0x0f) == 0x12345978
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0xf0) == 0x12345688
|
||||
; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0x0f) == 0x12345677
|
||||
|
||||
function %atomic_rmw_xor_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little xor v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0xf0) == 0xe2345678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0x0f) == 0x1d345678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0xf0) == 0x12c45678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0x0f) == 0x123b5678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0xf0) == 0x1234a678
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0x0f) == 0x12345978
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0xf0) == 0x12345688
|
||||
; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0x0f) == 0x12345677
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big nand v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0xf000) == 0xefff5678
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0x000f) == 0xfffb5678
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0xf000) == 0x1234afff
|
||||
; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0x000f) == 0x1234fff7
|
||||
|
||||
function %atomic_rmw_nand_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little nand v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0xf000) == 0xefff5678
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0x000f) == 0xfffb5678
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0xf000) == 0x1234afff
|
||||
; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0x000f) == 0x1234fff7
|
||||
|
||||
function %atomic_rmw_nand_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big nand v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0xf0) == 0xef345678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0x0f) == 0xfd345678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0xf0) == 0x12cf5678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0x0f) == 0x12fb5678
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0xf0) == 0x1234af78
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0x0f) == 0x1234f978
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0xf0) == 0x1234568f
|
||||
; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0x0f) == 0x123456f7
|
||||
|
||||
function %atomic_rmw_nand_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little nand v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0xf0) == 0xef345678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0x0f) == 0xfd345678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0xf0) == 0x12cf5678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0x0f) == 0x12fb5678
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0xf0) == 0x1234af78
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0x0f) == 0x1234f978
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0xf0) == 0x1234568f
|
||||
; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0x0f) == 0x123456f7
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big umin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little umin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big umin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
|
||||
function %atomic_rmw_umin_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little umin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big umax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_umax_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little umax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0x1111) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_umax_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big umax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_umax_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little umax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0x11) == 0x12345678
|
||||
; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big smin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_smin_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little smin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_smin_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big smin v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_smin_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little smin v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big smax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0x7fff) == 0x7fff5678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0x7fff) == 0x12347fff
|
||||
|
||||
function %atomic_rmw_smax_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little smax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0x7fff) == 0x7fff5678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0xffff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0x7fff) == 0x12347fff
|
||||
|
||||
function %atomic_rmw_smax_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big smax v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0x7f) == 0x7f345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0x7f) == 0x127f5678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0x7f) == 0x12347f78
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0x7f) == 0x1234567f
|
||||
|
||||
function %atomic_rmw_smax_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little smax v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0x7f) == 0x7f345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0x7f) == 0x127f5678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0x7f) == 0x12347f78
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0xff) == 0x12345678
|
||||
; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0x7f) == 0x1234567f
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_big_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 big xchg v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_xchg_little_i16(i32, i64, i16) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i16):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i16 little xchg v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x11115678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0xffff5678
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x12341111
|
||||
; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff
|
||||
|
||||
function %atomic_rmw_xchg_big_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 big v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 big xchg v4, v2
|
||||
|
||||
v6 = load.i32 big v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0xff) == 0x123456ff
|
||||
|
||||
function %atomic_rmw_xchg_little_i8(i32, i64, i8) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i64, v2: i8):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.i32 little v0, v3
|
||||
|
||||
v4 = iadd.i64 v3, v1
|
||||
v5 = atomic_rmw.i8 little xchg v4, v2
|
||||
|
||||
v6 = load.i32 little v3
|
||||
return v6
|
||||
}
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0x11) == 0x11345678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0xff) == 0xff345678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0x11) == 0x12115678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0xff) == 0x12ff5678
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0x11) == 0x12341178
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0xff) == 0x1234ff78
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0x11) == 0x12345611
|
||||
; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0xff) == 0x123456ff
|
||||
|
||||
@@ -196,3 +196,237 @@ block0(v0: i32, v1: i32):
|
||||
; run: %atomic_rmw_xor_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xor_i32(1, 1) == 0
|
||||
; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_nand_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 nand v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i64(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i64(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i64(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E
|
||||
|
||||
function %atomic_rmw_nand_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 nand v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_nand_i32(0, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 0) == -1
|
||||
; run: %atomic_rmw_nand_i32(0, 1) == -1
|
||||
; run: %atomic_rmw_nand_i32(1, 1) == -2
|
||||
; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_umin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_umin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_umin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 umax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 umax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_umax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_umax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_umax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_umax_i32(-1, 1) == -1
|
||||
; run: %atomic_rmw_umax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smin v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i64(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i64(-1, 1) == -1
|
||||
; run: %atomic_rmw_smin_i64(-1, -3) == -3
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smin v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smin_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_smin_i32(0, 1) == 0
|
||||
; run: %atomic_rmw_smin_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smin_i32(-1, -1) == -1
|
||||
; run: %atomic_rmw_smin_i32(-1, -3) == -3
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 smax v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i64(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i64(-1, -3) == -1
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 smax v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_smax_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_smax_i32(1, 0) == 1
|
||||
; run: %atomic_rmw_smax_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, 1) == 1
|
||||
; run: %atomic_rmw_smax_i32(-1, -3) == -1
|
||||
|
||||
|
||||
|
||||
function %atomic_rmw_xchg_i64(i64, i64) -> i64 {
|
||||
ss0 = explicit_slot 8
|
||||
|
||||
block0(v0: i64, v1: i64):
|
||||
stack_store.i64 v0, ss0
|
||||
|
||||
v2 = stack_addr.i64 ss0
|
||||
v3 = atomic_rmw.i64 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i64 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i64(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i64(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
function %atomic_rmw_xchg_i32(i32, i32) -> i32 {
|
||||
ss0 = explicit_slot 4
|
||||
|
||||
block0(v0: i32, v1: i32):
|
||||
stack_store.i32 v0, ss0
|
||||
|
||||
v2 = stack_addr.i32 ss0
|
||||
v3 = atomic_rmw.i32 xchg v2, v1
|
||||
|
||||
v4 = stack_load.i32 ss0
|
||||
return v4
|
||||
}
|
||||
; run: %atomic_rmw_xchg_i32(0, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(1, 0) == 0
|
||||
; run: %atomic_rmw_xchg_i32(0, 1) == 1
|
||||
; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
Reference in New Issue
Block a user