Files
wasmtime/cranelift/filetests/filetests/isa/aarch64/atomic-rmw-lse.clif
Trevor Elliott cc073593a4 Fix block label printing in precise-output tests (#5798)
As a follow-up to #5780, disassemble the regions identified by bb_starts, falling back on disassembling the whole buffer. This ensures that instructions like br_table that introduce a lot of constants don't throw off capstone for the remainder of the function.

---------

Co-authored-by: Jamey Sharp <jamey@minilop.net>
2023-02-16 02:35:26 +00:00

768 lines
13 KiB
Plaintext

test compile precise-output
target aarch64 has_lse
function %atomic_rmw_add_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 add v0, v1
return
}
; VCode:
; block0:
; ldaddal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldaddal x1, x3, [x0]
; ret
function %atomic_rmw_add_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 add v0, v1
return
}
; VCode:
; block0:
; ldaddal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldaddal w1, w3, [x0]
; ret
function %atomic_rmw_add_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 add v0, v1
return
}
; VCode:
; block0:
; ldaddalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldaddalh w1, w3, [x0]
; ret
function %atomic_rmw_add_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 add v0, v1
return
}
; VCode:
; block0:
; ldaddalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldaddalb w1, w3, [x0]
; ret
function %atomic_rmw_sub_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 sub v0, v1
return
}
; VCode:
; block0:
; sub x3, xzr, x1
; ldaddal x3, x5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; neg x3, x1
; ldaddal x3, x5, [x0]
; ret
function %atomic_rmw_sub_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 sub v0, v1
return
}
; VCode:
; block0:
; sub w3, wzr, w1
; ldaddal w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; neg w3, w1
; ldaddal w3, w5, [x0]
; ret
function %atomic_rmw_sub_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 sub v0, v1
return
}
; VCode:
; block0:
; sub w3, wzr, w1
; ldaddalh w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; neg w3, w1
; ldaddalh w3, w5, [x0]
; ret
function %atomic_rmw_sub_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 sub v0, v1
return
}
; VCode:
; block0:
; sub w3, wzr, w1
; ldaddalb w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; neg w3, w1
; ldaddalb w3, w5, [x0]
; ret
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 and v0, v1
return
}
; VCode:
; block0:
; eon x3, x1, xzr
; ldclral x3, x5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; eon x3, x1, xzr
; ldclral x3, x5, [x0]
; ret
function %atomic_rmw_and_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 and v0, v1
return
}
; VCode:
; block0:
; eon w3, w1, wzr
; ldclral w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; eon w3, w1, wzr
; ldclral w3, w5, [x0]
; ret
function %atomic_rmw_and_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 and v0, v1
return
}
; VCode:
; block0:
; eon w3, w1, wzr
; ldclralh w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; eon w3, w1, wzr
; ldclralh w3, w5, [x0]
; ret
function %atomic_rmw_and_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 and v0, v1
return
}
; VCode:
; block0:
; eon w3, w1, wzr
; ldclralb w3, w5, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; eon w3, w1, wzr
; ldclralb w3, w5, [x0]
; ret
function %atomic_rmw_nand_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 nand v0, v1
return
}
; VCode:
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_64 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
;
; Disassembled:
; block0: ; offset 0x0
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
; str x28, [sp, #-0x10]!
; stp x26, x27, [sp, #-0x10]!
; stp x24, x25, [sp, #-0x10]!
; block1: ; offset 0x14
; mov x25, x0
; mov x26, x1
; ldaxr x27, [x25]
; and x28, x27, x26
; mvn x28, x28
; stlxr w24, x28, [x25]
; cbnz x24, #0x1c
; ldp x24, x25, [sp], #0x10
; ldp x26, x27, [sp], #0x10
; ldr x28, [sp], #0x10
; ldp x29, x30, [sp], #0x10
; ret
function %atomic_rmw_nand_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 nand v0, v1
return
}
; VCode:
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_32 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
;
; Disassembled:
; block0: ; offset 0x0
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
; str x28, [sp, #-0x10]!
; stp x26, x27, [sp, #-0x10]!
; stp x24, x25, [sp, #-0x10]!
; block1: ; offset 0x14
; mov x25, x0
; mov x26, x1
; ldaxr w27, [x25]
; and w28, w27, w26
; mvn w28, w28
; stlxr w24, w28, [x25]
; cbnz x24, #0x1c
; ldp x24, x25, [sp], #0x10
; ldp x26, x27, [sp], #0x10
; ldr x28, [sp], #0x10
; ldp x29, x30, [sp], #0x10
; ret
function %atomic_rmw_nand_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 nand v0, v1
return
}
; VCode:
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_16 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
;
; Disassembled:
; block0: ; offset 0x0
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
; str x28, [sp, #-0x10]!
; stp x26, x27, [sp, #-0x10]!
; stp x24, x25, [sp, #-0x10]!
; block1: ; offset 0x14
; mov x25, x0
; mov x26, x1
; ldaxrh w27, [x25]
; and w28, w27, w26
; mvn w28, w28
; stlxrh w24, w28, [x25]
; cbnz x24, #0x1c
; ldp x24, x25, [sp], #0x10
; ldp x26, x27, [sp], #0x10
; ldr x28, [sp], #0x10
; ldp x29, x30, [sp], #0x10
; ret
function %atomic_rmw_nand_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 nand v0, v1
return
}
; VCode:
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x26, x1
; atomic_rmw_loop_nand_8 addr=x25 operand=x26 oldval=x27 scratch1=x24 scratch2=x28
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
;
; Disassembled:
; block0: ; offset 0x0
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
; str x28, [sp, #-0x10]!
; stp x26, x27, [sp, #-0x10]!
; stp x24, x25, [sp, #-0x10]!
; block1: ; offset 0x14
; mov x25, x0
; mov x26, x1
; ldaxrb w27, [x25]
; and w28, w27, w26
; mvn w28, w28
; stlxrb w24, w28, [x25]
; cbnz x24, #0x1c
; ldp x24, x25, [sp], #0x10
; ldp x26, x27, [sp], #0x10
; ldr x28, [sp], #0x10
; ldp x29, x30, [sp], #0x10
; ret
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 or v0, v1
return
}
; VCode:
; block0:
; ldsetal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsetal x1, x3, [x0]
; ret
function %atomic_rmw_or_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 or v0, v1
return
}
; VCode:
; block0:
; ldsetal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsetal w1, w3, [x0]
; ret
function %atomic_rmw_or_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 or v0, v1
return
}
; VCode:
; block0:
; ldsetalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsetalh w1, w3, [x0]
; ret
function %atomic_rmw_or_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 or v0, v1
return
}
; VCode:
; block0:
; ldsetalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsetalb w1, w3, [x0]
; ret
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 xor v0, v1
return
}
; VCode:
; block0:
; ldeoral x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldeoral x1, x3, [x0]
; ret
function %atomic_rmw_xor_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 xor v0, v1
return
}
; VCode:
; block0:
; ldeoral w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldeoral w1, w3, [x0]
; ret
function %atomic_rmw_xor_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 xor v0, v1
return
}
; VCode:
; block0:
; ldeoralh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldeoralh w1, w3, [x0]
; ret
function %atomic_rmw_xor_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 xor v0, v1
return
}
; VCode:
; block0:
; ldeoralb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldeoralb w1, w3, [x0]
; ret
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smax v0, v1
return
}
; VCode:
; block0:
; ldsmaxal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsmaxal x1, x3, [x0]
; ret
function %atomic_rmw_smax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smax v0, v1
return
}
; VCode:
; block0:
; ldsmaxal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsmaxal w1, w3, [x0]
; ret
function %atomic_rmw_smax_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 smax v0, v1
return
}
; VCode:
; block0:
; ldsmaxalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsmaxalh w1, w3, [x0]
; ret
function %atomic_rmw_smax_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 smax v0, v1
return
}
; VCode:
; block0:
; ldsmaxalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsmaxalb w1, w3, [x0]
; ret
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umax v0, v1
return
}
; VCode:
; block0:
; ldumaxal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldumaxal x1, x3, [x0]
; ret
function %atomic_rmw_umax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umax v0, v1
return
}
; VCode:
; block0:
; ldumaxal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldumaxal w1, w3, [x0]
; ret
function %atomic_rmw_umax_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 umax v0, v1
return
}
; VCode:
; block0:
; ldumaxalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldumaxalh w1, w3, [x0]
; ret
function %atomic_rmw_umax_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 umax v0, v1
return
}
; VCode:
; block0:
; ldumaxalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldumaxalb w1, w3, [x0]
; ret
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smin v0, v1
return
}
; VCode:
; block0:
; ldsminal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsminal x1, x3, [x0]
; ret
function %atomic_rmw_smin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smin v0, v1
return
}
; VCode:
; block0:
; ldsminal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsminal w1, w3, [x0]
; ret
function %atomic_rmw_smin_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 smin v0, v1
return
}
; VCode:
; block0:
; ldsminalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsminalh w1, w3, [x0]
; ret
function %atomic_rmw_smin_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 smin v0, v1
return
}
; VCode:
; block0:
; ldsminalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; ldsminalb w1, w3, [x0]
; ret
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umin v0, v1
return
}
; VCode:
; block0:
; lduminal x1, x3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; lduminal x1, x3, [x0]
; ret
function %atomic_rmw_umin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umin v0, v1
return
}
; VCode:
; block0:
; lduminal w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; lduminal w1, w3, [x0]
; ret
function %atomic_rmw_umin_i16(i64, i16) {
block0(v0: i64, v1: i16):
v2 = atomic_rmw.i16 umin v0, v1
return
}
; VCode:
; block0:
; lduminalh w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; lduminalh w1, w3, [x0]
; ret
function %atomic_rmw_umin_i8(i64, i8) {
block0(v0: i64, v1: i8):
v2 = atomic_rmw.i8 umin v0, v1
return
}
; VCode:
; block0:
; lduminalb w1, w3, [x0]
; ret
;
; Disassembled:
; block0: ; offset 0x0
; lduminalb w1, w3, [x0]
; ret