Enable regalloc2's SSA verifier in debug builds to check for any outstanding reuse of virtual registers in def constraints. As fuzzing enables debug_assertions, this will enable the SSA verifier when fuzzing as well.
1012 lines
16 KiB
Plaintext
1012 lines
16 KiB
Plaintext
test compile precise-output
|
|
target s390x
|
|
|
|
function %rotr_i128_vr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = rotr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vl %v3, 0(%r4)
|
|
; vrepb %v6, %v3, 15
|
|
; vlcb %v16, %v6
|
|
; vslb %v18, %v1, %v16
|
|
; vsl %v20, %v18, %v16
|
|
; vsrlb %v22, %v1, %v6
|
|
; vsrl %v24, %v22, %v6
|
|
; vo %v26, %v20, %v24
|
|
; vst %v26, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotr_i128_reg(i128, i64) -> i128 {
|
|
block0(v0: i128, v1: i64):
|
|
v2 = rotr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vlvgb %v5, %r4, 0
|
|
; vrepb %v7, %v5, 0
|
|
; vlcb %v17, %v7
|
|
; vslb %v19, %v1, %v17
|
|
; vsl %v21, %v19, %v17
|
|
; vsrlb %v23, %v1, %v7
|
|
; vsrl %v25, %v23, %v7
|
|
; vo %v27, %v21, %v25
|
|
; vst %v27, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotr_i128_imm(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = iconst.i32 17
|
|
v2 = rotr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vrepib %v4, 17
|
|
; vlcb %v6, %v4
|
|
; vslb %v16, %v1, %v6
|
|
; vsl %v18, %v16, %v6
|
|
; vsrlb %v20, %v1, %v4
|
|
; vsrl %v22, %v20, %v4
|
|
; vo %v24, %v18, %v22
|
|
; vst %v24, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotr_i64_vr(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; rllg %r2, %r2, 0(%r4)
|
|
; br %r14
|
|
|
|
function %rotr_i64_reg(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lcr %r5, %r3
|
|
; rllg %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %rotr_i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rllg %r2, %r2, 47
|
|
; br %r14
|
|
|
|
function %rotr_i32_vr(i32, i128) -> i32 {
|
|
block0(v0: i32, v1: i128):
|
|
v2 = rotr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; rll %r2, %r2, 0(%r4)
|
|
; br %r14
|
|
|
|
function %rotr_i32_reg(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lcr %r5, %r3
|
|
; rll %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %rotr_i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = rotr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rll %r2, %r2, 15
|
|
; br %r14
|
|
|
|
function %rotr_i16_vr(i16, i128) -> i16 {
|
|
block0(v0: i16, v1: i128):
|
|
v2 = rotr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llhr %r2, %r2
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; nill %r3, 15
|
|
; nill %r4, 15
|
|
; sllk %r4, %r2, 0(%r4)
|
|
; srlk %r2, %r2, 0(%r3)
|
|
; ork %r2, %r4, %r2
|
|
; br %r14
|
|
|
|
function %rotr_i16_reg(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r5, %r2
|
|
; lcr %r2, %r3
|
|
; nill %r3, 15
|
|
; nill %r2, 15
|
|
; sllk %r2, %r5, 0(%r2)
|
|
; srlk %r3, %r5, 0(%r3)
|
|
; or %r2, %r3
|
|
; br %r14
|
|
|
|
function %rotr_i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = rotr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r4, %r2
|
|
; sllk %r2, %r4, 6
|
|
; srlk %r4, %r4, 10
|
|
; or %r2, %r4
|
|
; br %r14
|
|
|
|
function %rotr_i8_vr(i8, i128) -> i8 {
|
|
block0(v0: i8, v1: i128):
|
|
v2 = rotr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llcr %r2, %r2
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; nill %r3, 7
|
|
; nill %r4, 7
|
|
; sllk %r4, %r2, 0(%r4)
|
|
; srlk %r2, %r2, 0(%r3)
|
|
; ork %r2, %r4, %r2
|
|
; br %r14
|
|
|
|
function %rotr_i8_reg(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r5, %r2
|
|
; lcr %r2, %r3
|
|
; nill %r3, 7
|
|
; nill %r2, 7
|
|
; sllk %r2, %r5, 0(%r2)
|
|
; srlk %r3, %r5, 0(%r3)
|
|
; or %r2, %r3
|
|
; br %r14
|
|
|
|
function %rotr_i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = rotr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r4, %r2
|
|
; sllk %r2, %r4, 5
|
|
; srlk %r4, %r4, 3
|
|
; or %r2, %r4
|
|
; br %r14
|
|
|
|
function %rotl_i128_vr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = rotl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vl %v3, 0(%r4)
|
|
; vrepb %v6, %v3, 15
|
|
; vlcb %v16, %v6
|
|
; vslb %v18, %v1, %v6
|
|
; vsl %v20, %v18, %v6
|
|
; vsrlb %v22, %v1, %v16
|
|
; vsrl %v24, %v22, %v16
|
|
; vo %v26, %v20, %v24
|
|
; vst %v26, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotl_i128_reg(i128, i64) -> i128 {
|
|
block0(v0: i128, v1: i64):
|
|
v2 = rotl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vlvgb %v5, %r4, 0
|
|
; vrepb %v7, %v5, 0
|
|
; vlcb %v17, %v7
|
|
; vslb %v19, %v1, %v7
|
|
; vsl %v21, %v19, %v7
|
|
; vsrlb %v23, %v1, %v17
|
|
; vsrl %v25, %v23, %v17
|
|
; vo %v27, %v21, %v25
|
|
; vst %v27, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotl_i128_imm(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vrepib %v4, 17
|
|
; vlcb %v6, %v4
|
|
; vslb %v16, %v1, %v4
|
|
; vsl %v18, %v16, %v4
|
|
; vsrlb %v20, %v1, %v6
|
|
; vsrl %v22, %v20, %v6
|
|
; vo %v24, %v18, %v22
|
|
; vst %v24, 0(%r2)
|
|
; br %r14
|
|
|
|
function %rotl_i64_vr(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; rllg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %rotl_i64_reg(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rllg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %rotl_i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rllg %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %rotl_i32_vr(i32, i128) -> i32 {
|
|
block0(v0: i32, v1: i128):
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; rll %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %rotl_i32_reg(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rll %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %rotl_i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; rll %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %rotl_i16_vr(i16, i128) -> i16 {
|
|
block0(v0: i16, v1: i128):
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llhr %r2, %r2
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; nill %r3, 15
|
|
; nill %r4, 15
|
|
; sllk %r5, %r2, 0(%r3)
|
|
; srlk %r2, %r2, 0(%r4)
|
|
; ork %r2, %r5, %r2
|
|
; br %r14
|
|
|
|
function %rotl_i16_reg(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r5, %r2
|
|
; lcr %r2, %r3
|
|
; nill %r3, 15
|
|
; nill %r2, 15
|
|
; sllk %r3, %r5, 0(%r3)
|
|
; srlk %r4, %r5, 0(%r2)
|
|
; ork %r2, %r3, %r4
|
|
; br %r14
|
|
|
|
function %rotl_i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r4, %r2
|
|
; sllk %r2, %r4, 10
|
|
; srlk %r4, %r4, 6
|
|
; or %r2, %r4
|
|
; br %r14
|
|
|
|
function %rotl_i8_vr(i8, i128) -> i8 {
|
|
block0(v0: i8, v1: i128):
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llcr %r2, %r2
|
|
; vlgvg %r3, %v2, 1
|
|
; lcr %r4, %r3
|
|
; nill %r3, 7
|
|
; nill %r4, 7
|
|
; sllk %r5, %r2, 0(%r3)
|
|
; srlk %r2, %r2, 0(%r4)
|
|
; ork %r2, %r5, %r2
|
|
; br %r14
|
|
|
|
function %rotl_i8_reg(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r5, %r2
|
|
; lcr %r2, %r3
|
|
; nill %r3, 7
|
|
; nill %r2, 7
|
|
; sllk %r3, %r5, 0(%r3)
|
|
; srlk %r4, %r5, 0(%r2)
|
|
; ork %r2, %r3, %r4
|
|
; br %r14
|
|
|
|
function %rotr_i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r4, %r2
|
|
; sllk %r2, %r4, 3
|
|
; srlk %r4, %r4, 5
|
|
; or %r2, %r4
|
|
; br %r14
|
|
|
|
function %ushr_i128_vr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = ushr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vl %v3, 0(%r4)
|
|
; vrepb %v6, %v3, 15
|
|
; vsrlb %v16, %v1, %v6
|
|
; vsrl %v18, %v16, %v6
|
|
; vst %v18, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ushr_i128_reg(i128, i64) -> i128 {
|
|
block0(v0: i128, v1: i64):
|
|
v2 = ushr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vlvgb %v5, %r4, 0
|
|
; vrepb %v7, %v5, 0
|
|
; vsrlb %v17, %v1, %v7
|
|
; vsrl %v19, %v17, %v7
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ushr_i128_imm(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = iconst.i32 17
|
|
v2 = ushr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vrepib %v4, 17
|
|
; vsrlb %v6, %v1, %v4
|
|
; vsrl %v16, %v6, %v4
|
|
; vst %v16, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ushr_i64_vr(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; srlg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ushr_i64_reg(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srlg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ushr_i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srlg %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %ushr_i32_vr(i32, i128) -> i32 {
|
|
block0(v0: i32, v1: i128):
|
|
v2 = ushr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; nill %r3, 31
|
|
; srlk %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ushr_i32_reg(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ushr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r3
|
|
; nill %r5, 31
|
|
; srlk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ushr_i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = ushr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srlk %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %ushr_i16_vr(i16, i128) -> i16 {
|
|
block0(v0: i16, v1: i128):
|
|
v2 = ushr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llhr %r2, %r2
|
|
; vlgvg %r5, %v2, 1
|
|
; nill %r5, 15
|
|
; srlk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ushr_i16_reg(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ushr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r5, %r2
|
|
; nill %r3, 15
|
|
; srlk %r2, %r5, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ushr_i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = ushr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llhr %r4, %r2
|
|
; srlk %r2, %r4, 10
|
|
; br %r14
|
|
|
|
function %ushr_i8_vr(i8, i128) -> i8 {
|
|
block0(v0: i8, v1: i128):
|
|
v2 = ushr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; llcr %r2, %r2
|
|
; vlgvg %r5, %v2, 1
|
|
; nill %r5, 7
|
|
; srlk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ushr_i8_reg(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ushr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r5, %r2
|
|
; nill %r3, 7
|
|
; srlk %r2, %r5, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ushr_i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = ushr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; llcr %r4, %r2
|
|
; srlk %r2, %r4, 3
|
|
; br %r14
|
|
|
|
function %ishl_i128_vr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = ishl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vl %v3, 0(%r4)
|
|
; vrepb %v6, %v3, 15
|
|
; vslb %v16, %v1, %v6
|
|
; vsl %v18, %v16, %v6
|
|
; vst %v18, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ishl_i128_reg(i128, i64) -> i128 {
|
|
block0(v0: i128, v1: i64):
|
|
v2 = ishl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vlvgb %v5, %r4, 0
|
|
; vrepb %v7, %v5, 0
|
|
; vslb %v17, %v1, %v7
|
|
; vsl %v19, %v17, %v7
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ishl_i128_imm(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = iconst.i32 17
|
|
v2 = ishl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vrepib %v4, 17
|
|
; vslb %v6, %v1, %v4
|
|
; vsl %v16, %v6, %v4
|
|
; vst %v16, 0(%r2)
|
|
; br %r14
|
|
|
|
function %ishl_i64_vr(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; sllg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ishl_i64_reg(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; sllg %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ishl_i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; sllg %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %ishl_i32_vr(i32, i128) -> i32 {
|
|
block0(v0: i32, v1: i128):
|
|
v2 = ishl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; nill %r3, 31
|
|
; sllk %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ishl_i32_reg(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ishl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r3
|
|
; nill %r5, 31
|
|
; sllk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ishl_i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = ishl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %ishl_i16_vr(i16, i128) -> i16 {
|
|
block0(v0: i16, v1: i128):
|
|
v2 = ishl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; nill %r3, 15
|
|
; sllk %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ishl_i16_reg(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ishl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r3
|
|
; nill %r5, 15
|
|
; sllk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ishl_i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = ishl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r2, 10
|
|
; br %r14
|
|
|
|
function %ishl_i8_vr(i8, i128) -> i8 {
|
|
block0(v0: i8, v1: i128):
|
|
v2 = ishl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; nill %r3, 7
|
|
; sllk %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %ishl_i8_reg(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ishl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r3
|
|
; nill %r5, 7
|
|
; sllk %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %ishl_i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = ishl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; sllk %r2, %r2, 3
|
|
; br %r14
|
|
|
|
function %sshr_i128_vr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = sshr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vl %v3, 0(%r4)
|
|
; vrepb %v6, %v3, 15
|
|
; vsrab %v16, %v1, %v6
|
|
; vsra %v18, %v16, %v6
|
|
; vst %v18, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sshr_i128_reg(i128, i64) -> i128 {
|
|
block0(v0: i128, v1: i64):
|
|
v2 = sshr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vlvgb %v5, %r4, 0
|
|
; vrepb %v7, %v5, 0
|
|
; vsrab %v17, %v1, %v7
|
|
; vsra %v19, %v17, %v7
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sshr_i128_imm(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = iconst.i32 17
|
|
v2 = sshr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vrepib %v4, 17
|
|
; vsrab %v6, %v1, %v4
|
|
; vsra %v16, %v6, %v4
|
|
; vst %v16, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sshr_i64_vr(i64, i128) -> i64 {
|
|
block0(v0: i64, v1: i128):
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; srag %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %sshr_i64_reg(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srag %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %sshr_i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srag %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %sshr_i32_vr(i32, i128) -> i32 {
|
|
block0(v0: i32, v1: i128):
|
|
v2 = sshr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; vlgvg %r3, %v2, 1
|
|
; nill %r3, 31
|
|
; srak %r2, %r2, 0(%r3)
|
|
; br %r14
|
|
|
|
function %sshr_i32_reg(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = sshr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lgr %r5, %r3
|
|
; nill %r5, 31
|
|
; srak %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %sshr_i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = sshr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; srak %r2, %r2, 17
|
|
; br %r14
|
|
|
|
function %sshr_i16_vr(i16, i128) -> i16 {
|
|
block0(v0: i16, v1: i128):
|
|
v2 = sshr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; lhr %r2, %r2
|
|
; vlgvg %r5, %v2, 1
|
|
; nill %r5, 15
|
|
; srak %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %sshr_i16_reg(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = sshr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lhr %r5, %r2
|
|
; nill %r3, 15
|
|
; srak %r2, %r5, 0(%r3)
|
|
; br %r14
|
|
|
|
function %sshr_i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = sshr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lhr %r4, %r2
|
|
; srak %r2, %r4, 10
|
|
; br %r14
|
|
|
|
function %sshr_i8_vr(i8, i128) -> i8 {
|
|
block0(v0: i8, v1: i128):
|
|
v2 = sshr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; vl %v2, 0(%r3)
|
|
; lbr %r2, %r2
|
|
; vlgvg %r5, %v2, 1
|
|
; nill %r5, 7
|
|
; srak %r2, %r2, 0(%r5)
|
|
; br %r14
|
|
|
|
function %sshr_i8_reg(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = sshr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lbr %r5, %r2
|
|
; nill %r3, 7
|
|
; srak %r2, %r5, 0(%r3)
|
|
; br %r14
|
|
|
|
function %sshr_i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = sshr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; block0:
|
|
; lbr %r4, %r2
|
|
; srak %r2, %r4, 3
|
|
; br %r14
|
|
|