AArch64 LSE atomic_rmw support

Rename the existing AtomicRMW to AtomicRMWLoop and directly lower
atomic_rmw operations, without a loop if LSE support is available.

Copyright (c) 2021, Arm Limited
This commit is contained in:
Sam Parker
2021-09-10 09:32:56 +01:00
parent d20194fa4c
commit 80d596b055
5 changed files with 605 additions and 22 deletions

View File

@@ -0,0 +1,114 @@
test compile
target aarch64 has_lse
function %atomic_rmw_add_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 add v0, v1
return
}
; check: ldaddal x1, x0, [x0]
function %atomic_rmw_add_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 add v0, v1
return
}
; check: ldaddal w1, w0, [x0]
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 and v0, v1
return
}
; check: ldclral x1, x0, [x0]
function %atomic_rmw_and_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 and v0, v1
return
}
; check: ldclral w1, w0, [x0]
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 or v0, v1
return
}
; check: ldsetal x1, x0, [x0]
function %atomic_rmw_or_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 or v0, v1
return
}
; check: ldsetal w1, w0, [x0]
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 xor v0, v1
return
}
; check: ldeoral x1, x0, [x0]
function %atomic_rmw_xor_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 xor v0, v1
return
}
; check: ldeoral w1, w0, [x0]
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smax v0, v1
return
}
; check: ldsmaxal x1, x0, [x0]
function %atomic_rmw_smax_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 smax v0, v1
return
}
; check: ldsmaxal w1, w0, [x0]
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umax v0, v1
return
}
; check: ldumaxal x1, x0, [x0]
function %atomic_rmw_umax_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 umax v0, v1
return
}
; check: ldumaxal w1, w0, [x0]
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smin v0, v1
return
}
; check: ldsminal x1, x0, [x0]
function %atomic_rmw_smin_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 smin v0, v1
return
}
; check: ldsminal w1, w0, [x0]
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umin v0, v1
return
}
; check: lduminal x1, x0, [x0]
function %atomic_rmw_umin_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 umin v0, v1
return
}
; check: lduminal w1, w0, [x0]