Files
wasmtime/cranelift/filetests/filetests/isa/aarch64/atomic-rmw.clif
Chris Fallin ae5fe8a728 aarch64: fix up regalloc2 semantics. (#4830)
This PR removes all uses of modify-operands in the aarch64 backend,
replacing them with reused-input operands instead. This has the nice
effect of removing a bunch of move instructions and more clearly
representing inputs and outputs.

This PR also removes the explicit use of pinned vregs in the aarch64
backend, instead using fixed-register constraints on the operands when
insts or pseudo-inst sequences require certain registers.

This is the second PR in the regalloc-semantics cleanup series; after
the remaining backend (s390x) and the ABI code are cleaned up as well,
we'll be able to simplify the regalloc2 frontend.
2022-09-01 21:25:20 +00:00

844 lines
19 KiB
Plaintext

test compile precise-output
target aarch64
function %atomic_rmw_add_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 add v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_add_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_add_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 add v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_add_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_add_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 add v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_add_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_add_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 add v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_add_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_sub_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 sub v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_sub_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_sub_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 sub v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_sub_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_sub_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 sub v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_sub_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_sub_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 sub v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_sub_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 and v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_and_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_and_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 and v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_and_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_and_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 and v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_and_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_and_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 and v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_and_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 nand v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 nand v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 nand v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 nand v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 or v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_orr_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_or_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 or v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_orr_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_or_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 or v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_orr_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_or_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 or v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_orr_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 xor v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_eor_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_xor_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 xor v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_eor_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_xor_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 xor v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_eor_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_xor_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 xor v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_eor_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smax_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smax_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smax_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 smax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smax_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smax_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 smax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smax_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umax_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umax_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umax_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 umax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umax_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umax_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 umax v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umax_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smin_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smin_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smin_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 smin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smin_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_smin_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 smin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_smin_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umin_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umin_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umin_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 umin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umin_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_umin_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 umin v0, v1
return
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_umin_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret