Use the capstone library to disassemble precise-output tests, in addition to pretty-printing their vcode.
1204 lines
17 KiB
Plaintext
1204 lines
17 KiB
Plaintext
test compile precise-output
|
|
target s390x
|
|
|
|
function %uextend_i64_i128(i64) -> i128 {
|
|
block0(v0: i64):
|
|
v1 = uextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vgbm %v4, 0
|
|
; vlvgg %v4, %r3, 1
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vzero %v4
|
|
; vlvgg %v4, %r3, 1
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
|
|
function %uextend_i32_i128(i32) -> i128 {
|
|
block0(v0: i32):
|
|
v1 = uextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vgbm %v4, 0
|
|
; vlvgf %v4, %r3, 3
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vzero %v4
|
|
; vlvgf %v4, %r3, 3
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
|
|
function %uextend_i32_i64(i32) -> i64 {
|
|
block0(v0: i32):
|
|
v1 = uextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llgfr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llgfr %r2, %r2
|
|
; br %r14
|
|
|
|
function %uextend_i16_i128(i16) -> i128 {
|
|
block0(v0: i16):
|
|
v1 = uextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vgbm %v4, 0
|
|
; vlvgh %v4, %r3, 7
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vzero %v4
|
|
; vlvgh %v4, %r3, 7
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
|
|
function %uextend_i16_i64(i16) -> i64 {
|
|
block0(v0: i16):
|
|
v1 = uextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llghr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llghr %r2, %r2
|
|
; br %r14
|
|
|
|
function %uextend_i16_i32(i16) -> i32 {
|
|
block0(v0: i16):
|
|
v1 = uextend.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llhr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llhr %r2, %r2
|
|
; br %r14
|
|
|
|
function %uextend_i8_i128(i8) -> i128 {
|
|
block0(v0: i8):
|
|
v1 = uextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vgbm %v4, 0
|
|
; vlvgb %v4, %r3, 15
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vzero %v4
|
|
; vlvgb %v4, %r3, 0xf
|
|
; vst %v4, 0(%r2)
|
|
; br %r14
|
|
|
|
function %uextend_i8_i64(i8) -> i64 {
|
|
block0(v0: i8):
|
|
v1 = uextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llgcr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llgcr %r2, %r2
|
|
; br %r14
|
|
|
|
function %uextend_i8_i32(i8) -> i32 {
|
|
block0(v0: i8):
|
|
v1 = uextend.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llcr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llcr %r2, %r2
|
|
; br %r14
|
|
|
|
function %uextend_i8_i16(i8) -> i16 {
|
|
block0(v0: i8):
|
|
v1 = uextend.i16 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; llcr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; llcr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i64_i128(i64) -> i128 {
|
|
block0(v0: i64):
|
|
v1 = sextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; srag %r5, %r3, 63
|
|
; vlvgp %v5, %r5, %r3
|
|
; vst %v5, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; srag %r5, %r3, 0x3f
|
|
; vlvgp %v5, %r5, %r3
|
|
; vst %v5, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sextend_i32_i128(i32) -> i128 {
|
|
block0(v0: i32):
|
|
v1 = sextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgfr %r5, %r3
|
|
; srag %r3, %r5, 63
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgfr %r5, %r3
|
|
; srag %r3, %r5, 0x3f
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sextend_i32_i64(i32) -> i64 {
|
|
block0(v0: i32):
|
|
v1 = sextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgfr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgfr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i16_i128(i16) -> i128 {
|
|
block0(v0: i16):
|
|
v1 = sextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lghr %r5, %r3
|
|
; srag %r3, %r5, 63
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lghr %r5, %r3
|
|
; srag %r3, %r5, 0x3f
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sextend_i16_i64(i16) -> i64 {
|
|
block0(v0: i16):
|
|
v1 = sextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lghr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lghr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i16_i32(i16) -> i32 {
|
|
block0(v0: i16):
|
|
v1 = sextend.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i8_i128(i8) -> i128 {
|
|
block0(v0: i8):
|
|
v1 = sextend.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgbr %r5, %r3
|
|
; srag %r3, %r5, 63
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgbr %r5, %r3
|
|
; srag %r3, %r5, 0x3f
|
|
; vlvgp %v7, %r3, %r5
|
|
; vst %v7, 0(%r2)
|
|
; br %r14
|
|
|
|
function %sextend_i8_i64(i8) -> i64 {
|
|
block0(v0: i8):
|
|
v1 = sextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgbr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgbr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i8_i32(i8) -> i32 {
|
|
block0(v0: i8):
|
|
v1 = sextend.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r2, %r2
|
|
; br %r14
|
|
|
|
function %sextend_i8_i16(i8) -> i16 {
|
|
block0(v0: i8):
|
|
v1 = sextend.i16 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r2, %r2
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r2, %r2
|
|
; br %r14
|
|
|
|
function %ireduce_i128_i64(i128) -> i64 {
|
|
block0(v0: i128):
|
|
v1 = ireduce.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
|
|
function %ireduce_i128_i32(i128) -> i32 {
|
|
block0(v0: i128):
|
|
v1 = ireduce.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
|
|
function %ireduce_i128_i16(i128) -> i16 {
|
|
block0(v0: i128):
|
|
v1 = ireduce.i16 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
|
|
function %ireduce_i128_i8(i128) -> i8 {
|
|
block0(v0: i128):
|
|
v1 = ireduce.i8 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vlgvg %r2, %v1, 1
|
|
; br %r14
|
|
|
|
function %ireduce_i64_i32(i64, i64) -> i32 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ireduce.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %ireduce_i64_i16(i64, i64) -> i16 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ireduce.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %ireduce_i64_i8(i64, i64) -> i8 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ireduce.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %ireduce_i32_i16(i32, i32) -> i16 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ireduce.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %ireduce_i32_i8(i32, i32) -> i8 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ireduce.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %ireduce_i16_i8(i16, i16) -> i8 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ireduce.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lgr %r2, %r3
|
|
; br %r14
|
|
|
|
function %bmask_i128_i128(i128) -> i128 {
|
|
block0(v0: i128):
|
|
v1 = bmask.i128 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r3)
|
|
; vgbm %v4, 0
|
|
; vceqgs %v6, %v1, %v4
|
|
; lghi %r3, 0
|
|
; locghine %r3, -1
|
|
; vlvgp %v20, %r3, %r3
|
|
; vst %v20, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r3)
|
|
; vzero %v4
|
|
; vceqgs %v6, %v1, %v4
|
|
; lghi %r3, 0
|
|
; locghine %r3, -1
|
|
; vlvgp %v20, %r3, %r3
|
|
; vst %v20, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i128_i64(i128) -> i64 {
|
|
block0(v0: i128):
|
|
v1 = bmask.i64 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vgbm %v3, 0
|
|
; vceqgs %v5, %v1, %v3
|
|
; lghi %r2, 0
|
|
; locghine %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vzero %v3
|
|
; vceqgs %v5, %v1, %v3
|
|
; lghi %r2, 0
|
|
; locghine %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i128_i32(i128) -> i32 {
|
|
block0(v0: i128):
|
|
v1 = bmask.i32 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vgbm %v3, 0
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vzero %v3
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i128_i16(i128) -> i16 {
|
|
block0(v0: i128):
|
|
v1 = bmask.i16 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vgbm %v3, 0
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vzero %v3
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i128_i8(i128) -> i8 {
|
|
block0(v0: i128):
|
|
v1 = bmask.i8 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; vl %v1, 0(%r2)
|
|
; vgbm %v3, 0
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; vl %v1, 0(%r2)
|
|
; vzero %v3
|
|
; vceqgs %v5, %v1, %v3
|
|
; lhi %r2, 0
|
|
; lochine %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i64_i128(i64, i64) -> i128 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bmask.i128 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; cghi %r4, 0
|
|
; lghi %r4, 0
|
|
; locghilh %r4, -1
|
|
; vlvgp %v17, %r4, %r4
|
|
; vst %v17, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; cghi %r4, 0
|
|
; lghi %r4, 0
|
|
; locghilh %r4, -1
|
|
; vlvgp %v17, %r4, %r4
|
|
; vst %v17, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i64_i64(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bmask.i64 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; cghi %r3, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; cghi %r3, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i64_i32(i64, i64) -> i32 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bmask.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i64_i16(i64, i64) -> i16 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bmask.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i64_i8(i64, i64) -> i8 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bmask.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; cghi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i32_i128(i32, i32) -> i128 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = bmask.i128 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; chi %r4, 0
|
|
; lghi %r4, 0
|
|
; locghilh %r4, -1
|
|
; vlvgp %v17, %r4, %r4
|
|
; vst %v17, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; chi %r4, 0
|
|
; lghi %r4, 0
|
|
; locghilh %r4, -1
|
|
; vlvgp %v17, %r4, %r4
|
|
; vst %v17, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i32_i64(i32, i32) -> i64 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = bmask.i64 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; chi %r3, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; chi %r3, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i32_i32(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = bmask.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i32_i16(i32, i32) -> i16 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = bmask.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i32_i8(i32, i32) -> i8 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = bmask.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; chi %r3, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i16_i128(i16, i16) -> i128 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = bmask.i128 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i16_i64(i16, i16) -> i64 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = bmask.i64 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i16_i32(i16, i16) -> i32 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = bmask.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i16_i16(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = bmask.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i16_i8(i16, i16) -> i8 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = bmask.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lhr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i128(i8, i8) -> i128 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i128 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i8_i64(i8, i8) -> i64 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i64 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i32(i8, i8) -> i32 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i16(i8, i8) -> i16 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i8(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i128(i8, i8) -> i128 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i128 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r3, %r4
|
|
; chi %r3, 0
|
|
; lghi %r3, 0
|
|
; locghilh %r3, -1
|
|
; vlvgp %v19, %r3, %r3
|
|
; vst %v19, 0(%r2)
|
|
; br %r14
|
|
|
|
function %bmask_i8_i64(i8, i8) -> i64 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i64 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lghi %r2, 0
|
|
; locghilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i32(i8, i8) -> i32 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i32 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i16(i8, i8) -> i16 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i16 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|
|
function %bmask_i8_i8(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = bmask.i8 v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; lbr %r5, %r3
|
|
; chi %r5, 0
|
|
; lhi %r2, 0
|
|
; lochilh %r2, -1
|
|
; br %r14
|
|
|