Modify return pseudo-instructions to have pairs of registers: virtual and real. This allows us to constrain the virtual registers to the real ones specified by the abi, instead of directly emitting moves to those real registers.
613 lines
14 KiB
Plaintext
613 lines
14 KiB
Plaintext
test compile precise-output
|
|
target s390x
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ATOMIC_RMW (XCHG)
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 xchg v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 xchg v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 xchg v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 xchg v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; lcr %r3, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r3) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_add_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 add v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; laag %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_add_i32(i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 add v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; laa %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_add_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 add v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r3 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_add_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 add v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r3 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_sub_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 sub v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lcgr %r5, %r3
|
|
; laag %r2, %r5, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_sub_i32(i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 sub v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lcr %r5, %r3
|
|
; laa %r2, %r5, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 sub v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r3 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 sub v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r3 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_and_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 and v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lang %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_and_i32(i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 and v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lan %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_and_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 and v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_and_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 and v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; lcr %r3, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r3) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_or_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 or v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; laog %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_or_i32(i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 or v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lao %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_or_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 or v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_or_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 or v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; lcr %r3, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r3) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xor_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = atomic_rmw.i64 xor v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; laxg %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xor_i32(i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i32):
|
|
v2 = atomic_rmw.i32 xor v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lax %r2, %r3, 0(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 xor v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 xor v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; lcr %r3, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r3) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 nand v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: ngrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 nand v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: nrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 nand v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 nand v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; lcr %r3, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r3) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 smin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: cgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 smin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: cr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 smin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; cr %r3, %r1 ; jgnl 1f ; risbgn %r1, %r3, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 smin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; cr %r3, %r1 ; jgnl 1f ; risbgn %r1, %r3, 32, 40, 0 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 smax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: cgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 smax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: cr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 smax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; cr %r3, %r1 ; jgnh 1f ; risbgn %r1, %r3, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 smax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; cr %r3, %r1 ; jgnh 1f ; risbgn %r1, %r3, 32, 40, 0 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 umin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: clgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 umin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: clr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 umin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; clr %r3, %r1 ; jgnl 1f ; risbgn %r1, %r3, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 umin v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; clr %r3, %r1 ; jgnl 1f ; risbgn %r1, %r3, 32, 40, 0 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = atomic_rmw.i64 umax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lg %r0, 0(%r3)
|
|
; 0: clgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 {
|
|
block0(v0: i64, v1: i64, v2: i32):
|
|
v3 = atomic_rmw.i32 umax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; l %r0, 0(%r3)
|
|
; 0: clr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1:
|
|
; lgr %r2, %r0
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 {
|
|
block0(v0: i64, v1: i64, v2: i16):
|
|
v3 = atomic_rmw.i16 umax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r4
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r4, %r3
|
|
; nill %r4, 65532
|
|
; sllk %r3, %r5, 16
|
|
; l %r0, 0(%r4)
|
|
; 0: rll %r1, %r0, 0(%r2) ; clr %r3, %r1 ; jgnh 1f ; risbgn %r1, %r3, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r4) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 16(%r2)
|
|
; br %r14
|
|
|
|
function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 {
|
|
block0(v0: i64, v1: i64, v2: i8):
|
|
v3 = atomic_rmw.i8 umax v1, v2
|
|
return v3
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r3, 3
|
|
; lgr %r5, %r3
|
|
; nill %r5, 65532
|
|
; sllk %r3, %r4, 24
|
|
; lcr %r4, %r2
|
|
; l %r0, 0(%r5)
|
|
; 0: rll %r1, %r0, 0(%r2) ; clr %r3, %r1 ; jgnh 1f ; risbgn %r1, %r3, 32, 40, 0 ; rll %r1, %r1, 0(%r4) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1:
|
|
; rll %r2, %r0, 8(%r2)
|
|
; br %r14
|
|
|