Files
wasmtime/cranelift/filetests/filetests/isa/s390x/vecmem-arch13.clif
Trevor Elliott c5379051c4 Enable the ssa verifier in debug builds (#5354)
Enable regalloc2's SSA verifier in debug builds to check for any outstanding reuse of virtual registers in def constraints. As fuzzing enables debug_assertions, this will enable the SSA verifier when fuzzing as well.
2022-12-07 12:22:51 -08:00

424 lines
6.2 KiB
Plaintext

test compile precise-output
target s390x arch13
function %uload8x8_big(i64) -> i16x8 {
block0(v0: i64):
v1 = uload8x8 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuplhb %v24, %v2
; br %r14
function %uload16x4_big(i64) -> i32x4 {
block0(v0: i64):
v1 = uload16x4 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuplhh %v24, %v2
; br %r14
function %uload32x2_big(i64) -> i64x2 {
block0(v0: i64):
v1 = uload32x2 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuplhf %v24, %v2
; br %r14
function %sload8x8_big(i64) -> i16x8 {
block0(v0: i64):
v1 = sload8x8 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuphb %v24, %v2
; br %r14
function %sload16x4_big(i64) -> i32x4 {
block0(v0: i64):
v1 = sload16x4 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuphh %v24, %v2
; br %r14
function %sload32x2_big(i64) -> i64x2 {
block0(v0: i64):
v1 = sload32x2 big v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuphf %v24, %v2
; br %r14
function %load_i8x16_big(i64) -> i8x16 {
block0(v0: i64):
v1 = load.i8x16 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_i16x8_big(i64) -> i16x8 {
block0(v0: i64):
v1 = load.i16x8 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_i32x4_big(i64) -> i32x4 {
block0(v0: i64):
v1 = load.i32x4 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_i64x2_big(i64) -> i64x2 {
block0(v0: i64):
v1 = load.i64x2 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_i128_big(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 big v0
return v1
}
; block0:
; vl %v3, 0(%r3)
; vst %v3, 0(%r2)
; br %r14
function %load_f32x4_big(i64) -> f32x4 {
block0(v0: i64):
v1 = load.f32x4 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_f64x2_big(i64) -> f64x2 {
block0(v0: i64):
v1 = load.f64x2 big v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %store_i8x16_big(i8x16, i64) {
block0(v0: i8x16, v1: i64):
store.i8x16 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_i16x8_big(i16x8, i64) {
block0(v0: i16x8, v1: i64):
store.i16x8 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_i32x4_big(i32x4, i64) {
block0(v0: i32x4, v1: i64):
store.i32x4 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_i64x2_big(i64x2, i64) {
block0(v0: i64x2, v1: i64):
store.i64x2 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_i128_big(i128, i64) {
block0(v0: i128, v1: i64):
store.i128 big v0, v1
return
}
; block0:
; vl %v1, 0(%r2)
; vst %v1, 0(%r3)
; br %r14
function %store_f32x4_big(f32x4, i64) {
block0(v0: f32x4, v1: i64):
store.f32x4 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_f64x2_big(f64x2, i64) {
block0(v0: f64x2, v1: i64):
store.f64x2 big v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %uload8x8_little(i64) -> i16x8 {
block0(v0: i64):
v1 = uload8x8 little v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuplhb %v24, %v2
; br %r14
function %uload16x4_little(i64) -> i32x4 {
block0(v0: i64):
v1 = uload16x4 little v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; verllh %v4, %v2, 8
; vuplhh %v24, %v4
; br %r14
function %uload32x2_little(i64) -> i64x2 {
block0(v0: i64):
v1 = uload32x2 little v0
return v1
}
; block0:
; vlebrg %v2, 0(%r2), 0
; verllg %v4, %v2, 32
; vuplhf %v24, %v4
; br %r14
function %sload8x8_little(i64) -> i16x8 {
block0(v0: i64):
v1 = sload8x8 little v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; vuphb %v24, %v2
; br %r14
function %sload16x4_little(i64) -> i32x4 {
block0(v0: i64):
v1 = sload16x4 little v0
return v1
}
; block0:
; ld %f2, 0(%r2)
; verllh %v4, %v2, 8
; vuphh %v24, %v4
; br %r14
function %sload32x2_little(i64) -> i64x2 {
block0(v0: i64):
v1 = sload32x2 little v0
return v1
}
; block0:
; vlebrg %v2, 0(%r2), 0
; verllg %v4, %v2, 32
; vuphf %v24, %v4
; br %r14
function %load_i8x16_little(i64) -> i8x16 {
block0(v0: i64):
v1 = load.i8x16 little v0
return v1
}
; block0:
; vl %v24, 0(%r2)
; br %r14
function %load_i16x8_little(i64) -> i16x8 {
block0(v0: i64):
v1 = load.i16x8 little v0
return v1
}
; block0:
; vlbrh %v24, 0(%r2)
; br %r14
function %load_i32x4_little(i64) -> i32x4 {
block0(v0: i64):
v1 = load.i32x4 little v0
return v1
}
; block0:
; vlbrf %v24, 0(%r2)
; br %r14
function %load_i64x2_little(i64) -> i64x2 {
block0(v0: i64):
v1 = load.i64x2 little v0
return v1
}
; block0:
; vlbrg %v24, 0(%r2)
; br %r14
function %load_i128_little(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 little v0
return v1
}
; block0:
; vlbrq %v3, 0(%r3)
; vst %v3, 0(%r2)
; br %r14
function %load_f32x4_little(i64) -> f32x4 {
block0(v0: i64):
v1 = load.f32x4 little v0
return v1
}
; block0:
; vlbrf %v24, 0(%r2)
; br %r14
function %load_f64x2_little(i64) -> f64x2 {
block0(v0: i64):
v1 = load.f64x2 little v0
return v1
}
; block0:
; vlbrg %v24, 0(%r2)
; br %r14
function %store_i8x16_little(i8x16, i64) {
block0(v0: i8x16, v1: i64):
store.i8x16 little v0, v1
return
}
; block0:
; vst %v24, 0(%r2)
; br %r14
function %store_i16x8_little(i16x8, i64) {
block0(v0: i16x8, v1: i64):
store.i16x8 little v0, v1
return
}
; block0:
; vstbrh %v24, 0(%r2)
; br %r14
function %store_i32x4_little(i32x4, i64) {
block0(v0: i32x4, v1: i64):
store.i32x4 little v0, v1
return
}
; block0:
; vstbrf %v24, 0(%r2)
; br %r14
function %store_i64x2_little(i64x2, i64) {
block0(v0: i64x2, v1: i64):
store.i64x2 little v0, v1
return
}
; block0:
; vstbrg %v24, 0(%r2)
; br %r14
function %store_i128_little(i128, i64) {
block0(v0: i128, v1: i64):
store.i128 little v0, v1
return
}
; block0:
; vl %v1, 0(%r2)
; vstbrq %v1, 0(%r3)
; br %r14
function %store_f32x4_little(f32x4, i64) {
block0(v0: f32x4, v1: i64):
store.f32x4 little v0, v1
return
}
; block0:
; vstbrf %v24, 0(%r2)
; br %r14
function %store_f64x2_little(f64x2, i64) {
block0(v0: f64x2, v1: i64):
store.f64x2 little v0, v1
return
}
; block0:
; vstbrg %v24, 0(%r2)
; br %r14