Add a RISC-V 64 (`riscv64`, RV64GC) backend. Co-authored-by: yuyang <756445638@qq.com> Co-authored-by: Chris Fallin <chris@cfallin.org> Co-authored-by: Afonso Bordado <afonsobordado@az8.co>
211 lines
3.4 KiB
Plaintext
211 lines
3.4 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
target riscv64
|
|
|
|
function %atomic_rmw_add_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 add v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoadd.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_add_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 add v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoadd.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_sub_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 sub v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; sub a1,zero,a1
|
|
; amoadd.d.aqrl a2,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_sub_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 sub v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; sub a1,zero,a1
|
|
; amoadd.w.aqrl a2,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_and_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 and v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoand.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_and_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 and v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoand.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_nand_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 nand v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; mv a3,a0
|
|
; mv a2,a1
|
|
; atomic_rmw.i64 nand a0,a2,(a3)##t0=a1 offset=zero
|
|
; ret
|
|
|
|
function %atomic_rmw_nand_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 nand v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; mv a3,a0
|
|
; mv a2,a1
|
|
; atomic_rmw.i32 nand a0,a2,(a3)##t0=a1 offset=zero
|
|
; ret
|
|
|
|
function %atomic_rmw_or_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 or v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoor.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_or_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 or v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoor.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_xor_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 xor v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoxor.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_xor_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 xor v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amoxor.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_smax_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 smax v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomax.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_smax_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 smax v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomax.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_umax_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 umax v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomaxu.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_umax_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 umax v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomaxu.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_smin_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 smin v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomin.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_smin_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 smin v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amomin.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_umin_i64(i64, i64) {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 umin v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amominu.d.aqrl a0,a1,(a0)
|
|
; ret
|
|
|
|
function %atomic_rmw_umin_i32(i64, i32) {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 umin v0, v1
|
|
return
|
|
}
|
|
|
|
; block0:
|
|
; amominu.w.aqrl a0,a1,(a0)
|
|
; ret
|
|
|